Compare commits
453 Commits
v1.11.1
...
erikj/debu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b2d6fdd33 | ||
|
|
d263a4de02 | ||
|
|
66c1dff3ba | ||
|
|
96b6023e3b | ||
|
|
452019064c | ||
|
|
7c8e09bcf1 | ||
|
|
e7f5ac4ed8 | ||
|
|
208ab7b135 | ||
|
|
41f558ccf7 | ||
|
|
342796d6ac | ||
|
|
bc3fc3927f | ||
|
|
d67a8b5455 | ||
|
|
4734a7bbe4 | ||
|
|
1de36407d1 | ||
|
|
dede23ff1e | ||
|
|
75fbc1a0c6 | ||
|
|
623abbeb8d | ||
|
|
a0e063387d | ||
|
|
5d64fefd6c | ||
|
|
1124111a12 | ||
|
|
46cb2550bb | ||
|
|
18c1e52d82 | ||
|
|
00ba9c48bf | ||
|
|
782e4e64df | ||
|
|
edd3b0747c | ||
|
|
7ee24c5674 | ||
|
|
8ca79613e6 | ||
|
|
51fb0fc2e5 | ||
|
|
1a1da60ad2 | ||
|
|
8c8858e124 | ||
|
|
be309d99cf | ||
|
|
7cb8b4bc67 | ||
|
|
a8580c5f19 | ||
|
|
5cf758cdd6 | ||
|
|
fa4af2c3af | ||
|
|
e0caeedab3 | ||
|
|
8cced49764 | ||
|
|
647a995b96 | ||
|
|
c3416c888a | ||
|
|
b41d7b3969 | ||
|
|
3916c655f8 | ||
|
|
1f73d28b6c | ||
|
|
71c9307509 | ||
|
|
200ad02624 | ||
|
|
20ffaa7209 | ||
|
|
85155654c5 | ||
|
|
0ad6d28b0d | ||
|
|
67feea8044 | ||
|
|
616af44137 | ||
|
|
a4a5ec4096 | ||
|
|
aa5aa6f96a | ||
|
|
da9b2db3af | ||
|
|
5bb26b7c4f | ||
|
|
9e0384dd3f | ||
|
|
22246919e3 | ||
|
|
d7983b63a6 | ||
|
|
2929ce29d6 | ||
|
|
d9b8d27494 | ||
|
|
d7c2df2fa3 | ||
|
|
4162c39dcf | ||
|
|
e053c86a96 | ||
|
|
62ee862119 | ||
|
|
aee9130a83 | ||
|
|
fa0b2bd28d | ||
|
|
16b67c404d | ||
|
|
db5f9031b7 | ||
|
|
2e0c46ca07 | ||
|
|
79007a42b2 | ||
|
|
30a19daa02 | ||
|
|
e48361545d | ||
|
|
0f6ebf393d | ||
|
|
b26f3e582c | ||
|
|
c255b0ffdc | ||
|
|
a8c17da245 | ||
|
|
1242267316 | ||
|
|
7bf788ac73 | ||
|
|
7f7eedbebb | ||
|
|
5b8023dc7f | ||
|
|
d78265af0c | ||
|
|
13dd458b8d | ||
|
|
714560e325 | ||
|
|
79fe3e068b | ||
|
|
f9073893af | ||
|
|
16b1a34e80 | ||
|
|
fe69fb6263 | ||
|
|
7941a70fa8 | ||
|
|
d5aa7d93ed | ||
|
|
8123b2f909 | ||
|
|
15aa09bbe6 | ||
|
|
9858d5c362 | ||
|
|
ad088716bc | ||
|
|
068da604c2 | ||
|
|
350421e058 | ||
|
|
eab59d758d | ||
|
|
a251e0f4ba | ||
|
|
032e5a2aca | ||
|
|
b0cbc57375 | ||
|
|
97ef1471c6 | ||
|
|
0e719f2398 | ||
|
|
3085cde577 | ||
|
|
6b22921b19 | ||
|
|
2e8955f4a6 | ||
|
|
b2dba06079 | ||
|
|
627b0f5f27 | ||
|
|
9d8ecc9e6c | ||
|
|
37f6823f5b | ||
|
|
3eab76ad43 | ||
|
|
c2e1a2110f | ||
|
|
eeef9633af | ||
|
|
c58ae367d8 | ||
|
|
04dd7d182d | ||
|
|
fce663889b | ||
|
|
ce207aa0ff | ||
|
|
fb8ff79efd | ||
|
|
38919b521e | ||
|
|
07337fe30b | ||
|
|
036fab5d8a | ||
|
|
aa2492907f | ||
|
|
cc9eceb00d | ||
|
|
7bfe0902ce | ||
|
|
33bceb7f70 | ||
|
|
69a1ac00b2 | ||
|
|
3655eafe85 | ||
|
|
ce428a1abe | ||
|
|
68384d96fd | ||
|
|
204664d1ad | ||
|
|
ce9b62e13f | ||
|
|
9cbdfb3a2f | ||
|
|
23b28266ac | ||
|
|
2e3b9a0fcb | ||
|
|
fb825759e3 | ||
|
|
1adf6a5587 | ||
|
|
6f4319368b | ||
|
|
71a1abb8a1 | ||
|
|
69ad7cc13b | ||
|
|
841c581c40 | ||
|
|
f16beaa969 | ||
|
|
ba0aac5e44 | ||
|
|
82d8b1dd1f | ||
|
|
dc8003f921 | ||
|
|
83af1079d6 | ||
|
|
7c7618c7e3 | ||
|
|
13683a3a22 | ||
|
|
6b6685db9f | ||
|
|
2aa5bf13c8 | ||
|
|
51f7eaf908 | ||
|
|
51f358e2fe | ||
|
|
5308239d5d | ||
|
|
f89ad3b6df | ||
|
|
ff5604e7f1 | ||
|
|
336989a57f | ||
|
|
556566f0b8 | ||
|
|
974c0d726a | ||
|
|
461f01ad43 | ||
|
|
d41c8f6d4d | ||
|
|
a46ff43319 | ||
|
|
40f79f58bf | ||
|
|
13917232d5 | ||
|
|
f5ea8b48bd | ||
|
|
0f8f02bc39 | ||
|
|
054c231e58 | ||
|
|
701788a227 | ||
|
|
72fe2affb6 | ||
|
|
67ff7b8ba0 | ||
|
|
0d775fcc2d | ||
|
|
c07fca9e2f | ||
|
|
01294e6b3a | ||
|
|
d7d42387f5 | ||
|
|
eed7c5b89e | ||
|
|
17a2433b0d | ||
|
|
a48138784e | ||
|
|
6fb63d6426 | ||
|
|
f2049a8d21 | ||
|
|
f1097e7720 | ||
|
|
ac6a84818f | ||
|
|
fef82f4e22 | ||
|
|
4a0dadafbe | ||
|
|
f41b742161 | ||
|
|
118b58f0c9 | ||
|
|
ac978ab3da | ||
|
|
b85d7652ff | ||
|
|
967f99b9f8 | ||
|
|
a026bdaab7 | ||
|
|
e3cd28d8a4 | ||
|
|
59f0ca8b87 | ||
|
|
5a709630bf | ||
|
|
7f7f9968b3 | ||
|
|
55d46da59a | ||
|
|
24722de7c8 | ||
|
|
23f8d285eb | ||
|
|
cae4121484 | ||
|
|
c11d24d48c | ||
|
|
29b7e22b93 | ||
|
|
f31e65a749 | ||
|
|
aedeedc206 | ||
|
|
1722b8a527 | ||
|
|
d78cb31588 | ||
|
|
bd2ea3432b | ||
|
|
e13c6c7a96 | ||
|
|
c3e4b4edb2 | ||
|
|
6a519a0ca0 | ||
|
|
ec5ac8e2b1 | ||
|
|
2e105c156b | ||
|
|
ce72355d7f | ||
|
|
82498ee901 | ||
|
|
71953139d1 | ||
|
|
4b0f00ad0c | ||
|
|
b21000a44f | ||
|
|
5016b162fc | ||
|
|
d73bf18d13 | ||
|
|
694d8bed0e | ||
|
|
b0db928c63 | ||
|
|
334bfdbc90 | ||
|
|
07b88c546d | ||
|
|
0f05fd1530 | ||
|
|
fd4c975b5b | ||
|
|
bae32740da | ||
|
|
6dd6a3557c | ||
|
|
0cbb4808ed | ||
|
|
14a8e71297 | ||
|
|
883ac4b1bb | ||
|
|
cb40b0cb80 | ||
|
|
0122ef1037 | ||
|
|
8d4cbdeaa9 | ||
|
|
553c8a9b6b | ||
|
|
29ce90358c | ||
|
|
fcc2de7a0c | ||
|
|
daa1ac89a0 | ||
|
|
6d7cec7a57 | ||
|
|
f7d6e849b3 | ||
|
|
08edefe694 | ||
|
|
ec56620ff6 | ||
|
|
b730480abb | ||
|
|
61bb834364 | ||
|
|
84a901cf0c | ||
|
|
af47264b78 | ||
|
|
529462b5c0 | ||
|
|
b9930d24a0 | ||
|
|
468dcc767b | ||
|
|
250f87d0de | ||
|
|
dfa0782254 | ||
|
|
b413ab8aa6 | ||
|
|
7b608cf468 | ||
|
|
b4c2234232 | ||
|
|
51f4d52cb4 | ||
|
|
26d17b9bdc | ||
|
|
cfe8c8ab8e | ||
|
|
2e826cd80c | ||
|
|
60adcbed91 | ||
|
|
fe1580bfd9 | ||
|
|
b994e86e35 | ||
|
|
0a7b0882c1 | ||
|
|
62a7289133 | ||
|
|
677d0edbac | ||
|
|
3fb9fc40f5 | ||
|
|
5d99bde788 | ||
|
|
2cf115f0ea | ||
|
|
2cb38ca871 | ||
|
|
5bd2b27525 | ||
|
|
b5d0b038f4 | ||
|
|
b5ecafd157 | ||
|
|
db098ec994 | ||
|
|
7966a1cde9 | ||
|
|
7042840b32 | ||
|
|
d9f29f8dae | ||
|
|
4f21c33be3 | ||
|
|
07569f25d1 | ||
|
|
104844c1e1 | ||
|
|
6486c96b65 | ||
|
|
e577c5d607 | ||
|
|
c5f89fba55 | ||
|
|
7406477525 | ||
|
|
9fc588e6dc | ||
|
|
b7da598a61 | ||
|
|
84f7eaed16 | ||
|
|
fb69690761 | ||
|
|
8327eb9280 | ||
|
|
ae219fb411 | ||
|
|
90246344e3 | ||
|
|
319c41f573 | ||
|
|
63aea691a7 | ||
|
|
12aa5a7fa7 | ||
|
|
fbf0782c63 | ||
|
|
16ee97988a | ||
|
|
a07e03ce90 | ||
|
|
d9965fb8d6 | ||
|
|
09cc058a4c | ||
|
|
665630fcaa | ||
|
|
7496d3d2f6 | ||
|
|
fa4f12102d | ||
|
|
55ca6cf88c | ||
|
|
bdf3cdaec8 | ||
|
|
48b37f61ce | ||
|
|
7083147961 | ||
|
|
c2ab0b3066 | ||
|
|
825fb5d0a5 | ||
|
|
060e7dce09 | ||
|
|
e8e2ddb60a | ||
|
|
1c1242acba | ||
|
|
6ca5e56fd1 | ||
|
|
4cff617df1 | ||
|
|
7bab642707 | ||
|
|
b1cfaf08af | ||
|
|
28d9d6e8a9 | ||
|
|
39230d2171 | ||
|
|
1fcf9c6f95 | ||
|
|
d6828c129f | ||
|
|
c816072d47 | ||
|
|
190ab593b7 | ||
|
|
e341518f92 | ||
|
|
a564b92d37 | ||
|
|
5126cb1253 | ||
|
|
229eb81498 | ||
|
|
88bb6c27e1 | ||
|
|
066804f591 | ||
|
|
56b5f1d0ee | ||
|
|
a438950a00 | ||
|
|
2fa55c0cc6 | ||
|
|
b3cee0ce67 | ||
|
|
96071eea8f | ||
|
|
477c4f5b1c | ||
|
|
c165c1233b | ||
|
|
fdb1344716 | ||
|
|
caec7d4fa0 | ||
|
|
c2db6599c8 | ||
|
|
a319cb1dd1 | ||
|
|
c8c926f9c9 | ||
|
|
163f23785a | ||
|
|
5aa6dff99e | ||
|
|
e43e78b985 | ||
|
|
782b811789 | ||
|
|
e913823a22 | ||
|
|
8c75667ad7 | ||
|
|
443162e577 | ||
|
|
4a17a647a9 | ||
|
|
88b41986db | ||
|
|
6e6476ef07 | ||
|
|
6d110ddea4 | ||
|
|
c37db0211e | ||
|
|
4ce50519cd | ||
|
|
5e477c1deb | ||
|
|
7581d30e9f | ||
|
|
60724c46b7 | ||
|
|
6a35046363 | ||
|
|
7df04ca0e6 | ||
|
|
beb19cf61a | ||
|
|
d8d91983bc | ||
|
|
ebfcbbff9c | ||
|
|
77d0a4507b | ||
|
|
0de9f9486a | ||
|
|
f9e98176bf | ||
|
|
bd5e555b0d | ||
|
|
900bca9707 | ||
|
|
e55a240681 | ||
|
|
b8cfe79ffc | ||
|
|
8120a238a4 | ||
|
|
37a9873f63 | ||
|
|
e38c44b418 | ||
|
|
1cde4cf3f1 | ||
|
|
2dce68c651 | ||
|
|
9c0775e86a | ||
|
|
69ce55c510 | ||
|
|
54dd28621b | ||
|
|
751d51dd12 | ||
|
|
42ac4ca477 | ||
|
|
6640460d05 | ||
|
|
8f826f98ac | ||
|
|
dc6fb56c5f | ||
|
|
fe593ef990 | ||
|
|
5ec2077bf9 | ||
|
|
156f271867 | ||
|
|
51c094c4ac | ||
|
|
6b0efe73e2 | ||
|
|
39f6595b4a | ||
|
|
885134529f | ||
|
|
7e5f40e771 | ||
|
|
50ea178c20 | ||
|
|
04f4b5f6f8 | ||
|
|
14b2ebe767 | ||
|
|
f9e3a3f4d0 | ||
|
|
aee2bae952 | ||
|
|
87c65576e0 | ||
|
|
06eb5cae08 | ||
|
|
66315d862f | ||
|
|
bbf725e7da | ||
|
|
99bbe177b6 | ||
|
|
20545a2199 | ||
|
|
ce460dc31c | ||
|
|
fb078f921b | ||
|
|
1f5f3ae8b1 | ||
|
|
2bff4457d9 | ||
|
|
1d66dce83e | ||
|
|
54b78a0e3b | ||
|
|
297aaf4816 | ||
|
|
45df9d35a9 | ||
|
|
a27056d539 | ||
|
|
80e580ae92 | ||
|
|
87972f07e5 | ||
|
|
78a15b1f9d | ||
|
|
fe678a0900 | ||
|
|
83b6c69d3d | ||
|
|
31a2116331 | ||
|
|
13892776ef | ||
|
|
8ef8fb2c1c | ||
|
|
43f874055d | ||
|
|
7dcbc33a1b | ||
|
|
65a941d1f8 | ||
|
|
b29474e0aa | ||
|
|
3ab8e9c293 | ||
|
|
174aaa1d62 | ||
|
|
036c6cea07 | ||
|
|
e53744c737 | ||
|
|
cc7ab0d84a | ||
|
|
e4ffb14d57 | ||
|
|
d96ac97d29 | ||
|
|
12d4259000 | ||
|
|
f70f44abc7 | ||
|
|
59ad93d2a4 | ||
|
|
9ce4e344a8 | ||
|
|
f5caa1864e | ||
|
|
c3c6c0e622 | ||
|
|
9b06d8f8a6 | ||
|
|
ab0073a6c0 | ||
|
|
2201bc9795 | ||
|
|
cab4a52535 | ||
|
|
b32ac60c22 | ||
|
|
132b673dbe | ||
|
|
3e99528f2b | ||
|
|
1f773eec91 | ||
|
|
7728d87fd7 | ||
|
|
8c75b621bf | ||
|
|
c1156d3e2b | ||
|
|
e66f099ca9 | ||
|
|
bbf8886a05 | ||
|
|
4aea0bd292 | ||
|
|
691659568f | ||
|
|
a301934f46 | ||
|
|
4c2ed3f20e | ||
|
|
af6c389501 | ||
|
|
7b0e2d961c | ||
|
|
fcf4599488 | ||
|
|
7936d2a96e | ||
|
|
509e381afa | ||
|
|
272eee1ae1 | ||
|
|
4f7e4fc2fb | ||
|
|
1fcb9a1a7a | ||
|
|
0bd8cf435e | ||
|
|
99eed85a77 | ||
|
|
a90d0dc5c2 | ||
|
|
4fb5f4d0ce | ||
|
|
7b7c3cedf2 | ||
|
|
fc87d2ffb3 | ||
|
|
2b37eabca1 |
@@ -6,12 +6,7 @@
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
|
||||
|
||||
# workaround for https://github.com/jaraco/zipp/issues/40
|
||||
python3.5 -m pip install 'setuptools>=34.4.0'
|
||||
|
||||
python3.5 -m pip install tox
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
|
||||
@@ -5,8 +5,6 @@ Message history can be paginated
|
||||
|
||||
Can re-join room if re-invited
|
||||
|
||||
/upgrade creates a new room
|
||||
|
||||
The only membership state included in an initial sync is for all the senders in the timeline
|
||||
|
||||
Local device key changes get to remote servers
|
||||
|
||||
367
CHANGES.md
367
CHANGES.md
@@ -1,3 +1,370 @@
|
||||
Synapse 1.13.0rc2 (2020-05-14)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug which could cause messages not to be sent over federation, when state events with state keys matching user IDs (such as custom user statuses) were received. ([\#7376](https://github.com/matrix-org/synapse/issues/7376))
|
||||
- Restore compatibility with non-compliant clients during the user interactive authentication process, fixing a problem introduced in v1.13.0rc1. ([\#7483](https://github.com/matrix-org/synapse/issues/7483))
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix linting errors in new version of Flake8. ([\#7470](https://github.com/matrix-org/synapse/issues/7470))
|
||||
|
||||
|
||||
Synapse 1.13.0rc1 (2020-05-11)
|
||||
==============================
|
||||
|
||||
This release brings some potential changes necessary for certain
|
||||
configurations of Synapse:
|
||||
|
||||
* If your Synapse is configured to use SSO and have a custom
|
||||
`sso_redirect_confirm_template_dir` configuration option set, you will need
|
||||
to duplicate the new `sso_auth_confirm.html`, `sso_auth_success.html` and
|
||||
`sso_account_deactivated.html` templates into that directory.
|
||||
* Synapse plugins using the `complete_sso_login` method of
|
||||
`synapse.module_api.ModuleApi` should instead switch to the async/await
|
||||
version, `complete_sso_login_async`, which includes additional checks. The
|
||||
former version is now deprecated.
|
||||
* A bug was introduced in Synapse 1.4.0 which could cause the room directory
|
||||
to be incomplete or empty if Synapse was upgraded directly from v1.2.1 or
|
||||
earlier, to versions between v1.4.0 and v1.12.x.
|
||||
|
||||
Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes
|
||||
and for general upgrade guidance.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Extend the `web_client_location` option to accept an absolute URL to use as a redirect. Adds a warning when running the web client on the same hostname as homeserver. Contributed by Martin Milata. ([\#7006](https://github.com/matrix-org/synapse/issues/7006))
|
||||
- Set `Referrer-Policy` header to `no-referrer` on media downloads. ([\#7009](https://github.com/matrix-org/synapse/issues/7009))
|
||||
- Add support for running replication over Redis when using workers. ([\#7040](https://github.com/matrix-org/synapse/issues/7040), [\#7325](https://github.com/matrix-org/synapse/issues/7325), [\#7352](https://github.com/matrix-org/synapse/issues/7352), [\#7401](https://github.com/matrix-org/synapse/issues/7401), [\#7427](https://github.com/matrix-org/synapse/issues/7427), [\#7439](https://github.com/matrix-org/synapse/issues/7439), [\#7446](https://github.com/matrix-org/synapse/issues/7446), [\#7450](https://github.com/matrix-org/synapse/issues/7450), [\#7454](https://github.com/matrix-org/synapse/issues/7454))
|
||||
- Admin API `POST /_synapse/admin/v1/join/<roomIdOrAlias>` to join users to a room like `auto_join_rooms` for creation of users. ([\#7051](https://github.com/matrix-org/synapse/issues/7051))
|
||||
- Add options to prevent users from changing their profile or associated 3PIDs. ([\#7096](https://github.com/matrix-org/synapse/issues/7096))
|
||||
- Support SSO in the user interactive authentication workflow. ([\#7102](https://github.com/matrix-org/synapse/issues/7102), [\#7186](https://github.com/matrix-org/synapse/issues/7186), [\#7279](https://github.com/matrix-org/synapse/issues/7279), [\#7343](https://github.com/matrix-org/synapse/issues/7343))
|
||||
- Allow server admins to define and enforce a password policy ([MSC2000](https://github.com/matrix-org/matrix-doc/issues/2000)). ([\#7118](https://github.com/matrix-org/synapse/issues/7118))
|
||||
- Improve the support for SSO authentication on the login fallback page. ([\#7152](https://github.com/matrix-org/synapse/issues/7152), [\#7235](https://github.com/matrix-org/synapse/issues/7235))
|
||||
- Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. ([\#7153](https://github.com/matrix-org/synapse/issues/7153))
|
||||
- Admin users are no longer required to be in a room to create an alias for it. ([\#7191](https://github.com/matrix-org/synapse/issues/7191))
|
||||
- Require admin privileges to enable room encryption by default. This does not affect existing rooms. ([\#7230](https://github.com/matrix-org/synapse/issues/7230))
|
||||
- Add a config option for specifying the value of the Accept-Language HTTP header when generating URL previews. ([\#7265](https://github.com/matrix-org/synapse/issues/7265))
|
||||
- Allow `/requestToken` endpoints to hide the existence (or lack thereof) of 3PID associations on the homeserver. ([\#7315](https://github.com/matrix-org/synapse/issues/7315))
|
||||
- Add a configuration setting to tweak the threshold for dummy events. ([\#7422](https://github.com/matrix-org/synapse/issues/7422))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Don't attempt to use an invalid sqlite config if no database configuration is provided. Contributed by @nekatak. ([\#6573](https://github.com/matrix-org/synapse/issues/6573))
|
||||
- Fix single-sign on with CAS systems: pass the same service URL when requesting the CAS ticket and when calling the `proxyValidate` URL. Contributed by @Naugrimm. ([\#6634](https://github.com/matrix-org/synapse/issues/6634))
|
||||
- Fix missing field `default` when fetching user-defined push rules. ([\#6639](https://github.com/matrix-org/synapse/issues/6639))
|
||||
- Improve error responses when accessing remote public room lists. ([\#6899](https://github.com/matrix-org/synapse/issues/6899), [\#7368](https://github.com/matrix-org/synapse/issues/7368))
|
||||
- Transfer alias mappings on room upgrade. ([\#6946](https://github.com/matrix-org/synapse/issues/6946))
|
||||
- Ensure that a user interactive authentication session is tied to a single request. ([\#7068](https://github.com/matrix-org/synapse/issues/7068), [\#7455](https://github.com/matrix-org/synapse/issues/7455))
|
||||
- Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors. ([\#7089](https://github.com/matrix-org/synapse/issues/7089))
|
||||
- Return the proper error (`M_BAD_ALIAS`) when a non-existant canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109))
|
||||
- Fix a bug which meant that groups updates were not correctly replicated between workers. ([\#7117](https://github.com/matrix-org/synapse/issues/7117))
|
||||
- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133))
|
||||
- Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param. ([\#7150](https://github.com/matrix-org/synapse/issues/7150))
|
||||
- Fix error page being shown when a custom SAML handler attempted to redirect when processing an auth response. ([\#7151](https://github.com/matrix-org/synapse/issues/7151))
|
||||
- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155))
|
||||
- Fix excessive CPU usage by `prune_old_outbound_device_pokes` job. ([\#7159](https://github.com/matrix-org/synapse/issues/7159))
|
||||
- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177))
|
||||
- Fix a bug which could cause incorrect 'cyclic dependency' error. ([\#7178](https://github.com/matrix-org/synapse/issues/7178))
|
||||
- Fix a bug that could cause a user to be invited to a server notices (aka System Alerts) room without any notice being sent. ([\#7199](https://github.com/matrix-org/synapse/issues/7199))
|
||||
- Fix some worker-mode replication handling not being correctly recorded in CPU usage stats. ([\#7203](https://github.com/matrix-org/synapse/issues/7203))
|
||||
- Do not allow a deactivated user to login via SSO. ([\#7240](https://github.com/matrix-org/synapse/issues/7240), [\#7259](https://github.com/matrix-org/synapse/issues/7259))
|
||||
- Fix --help command-line argument. ([\#7249](https://github.com/matrix-org/synapse/issues/7249))
|
||||
- Fix room publish permissions not being checked on room creation. ([\#7260](https://github.com/matrix-org/synapse/issues/7260))
|
||||
- Reject unknown session IDs during user interactive authentication instead of silently creating a new session. ([\#7268](https://github.com/matrix-org/synapse/issues/7268))
|
||||
- Fix a SQL query introduced in Synapse 1.12.0 which could cause large amounts of logging to the postgres slow-query log. ([\#7274](https://github.com/matrix-org/synapse/issues/7274))
|
||||
- Persist user interactive authentication sessions across workers and Synapse restarts. ([\#7302](https://github.com/matrix-org/synapse/issues/7302))
|
||||
- Fixed backwards compatibility logic of the first value of `trusted_third_party_id_servers` being used for `account_threepid_delegates.email`, which occurs when the former, deprecated option is set and the latter is not. ([\#7316](https://github.com/matrix-org/synapse/issues/7316))
|
||||
- Fix a bug where event updates might not be sent over replication to worker processes after the stream falls behind. ([\#7337](https://github.com/matrix-org/synapse/issues/7337), [\#7358](https://github.com/matrix-org/synapse/issues/7358))
|
||||
- Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map. ([\#7341](https://github.com/matrix-org/synapse/issues/7341))
|
||||
- Fix incorrect metrics reporting for `renew_attestations` background task. ([\#7344](https://github.com/matrix-org/synapse/issues/7344))
|
||||
- Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included. ([\#7367](https://github.com/matrix-org/synapse/issues/7367))
|
||||
- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](UPGRADE.rst#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387))
|
||||
- Fix bug in `EventContext.deserialize`. ([\#7393](https://github.com/matrix-org/synapse/issues/7393))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update Debian installation instructions to recommend installing the `virtualenv` package instead of `python3-virtualenv`. ([\#6892](https://github.com/matrix-org/synapse/issues/6892))
|
||||
- Improve the documentation for database configuration. ([\#6988](https://github.com/matrix-org/synapse/issues/6988))
|
||||
- Improve the documentation of application service configuration files. ([\#7091](https://github.com/matrix-org/synapse/issues/7091))
|
||||
- Update pre-built package name for FreeBSD. ([\#7107](https://github.com/matrix-org/synapse/issues/7107))
|
||||
- Update postgres docs with login troubleshooting information. ([\#7119](https://github.com/matrix-org/synapse/issues/7119))
|
||||
- Clean up INSTALL.md a bit. ([\#7141](https://github.com/matrix-org/synapse/issues/7141))
|
||||
- Add documentation for running a local CAS server for testing. ([\#7147](https://github.com/matrix-org/synapse/issues/7147))
|
||||
- Improve README.md by being explicit about public IP recommendation for TURN relaying. ([\#7167](https://github.com/matrix-org/synapse/issues/7167))
|
||||
- Fix a small typo in the `metrics_flags` config option. ([\#7171](https://github.com/matrix-org/synapse/issues/7171))
|
||||
- Update the contributed documentation on managing synapse workers with systemd, and bring it into the core distribution. ([\#7234](https://github.com/matrix-org/synapse/issues/7234))
|
||||
- Add documentation to the `password_providers` config option. Add known password provider implementations to docs. ([\#7238](https://github.com/matrix-org/synapse/issues/7238), [\#7248](https://github.com/matrix-org/synapse/issues/7248))
|
||||
- Modify suggested nginx reverse proxy configuration to match Synapse's default file upload size. Contributed by @ProCycleDev. ([\#7251](https://github.com/matrix-org/synapse/issues/7251))
|
||||
- Documentation of media_storage_providers options updated to avoid misunderstandings. Contributed by Tristan Lins. ([\#7272](https://github.com/matrix-org/synapse/issues/7272))
|
||||
- Add documentation on monitoring workers with Prometheus. ([\#7357](https://github.com/matrix-org/synapse/issues/7357))
|
||||
- Clarify endpoint usage in the users admin api documentation. ([\#7361](https://github.com/matrix-org/synapse/issues/7361))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove nonfunctional `captcha_bypass_secret` option from `homeserver.yaml`. ([\#7137](https://github.com/matrix-org/synapse/issues/7137))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add benchmarks for LruCache. ([\#6446](https://github.com/matrix-org/synapse/issues/6446))
|
||||
- Return total number of users and profile attributes in admin users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6881](https://github.com/matrix-org/synapse/issues/6881))
|
||||
- Change device list streams to have one row per ID. ([\#7010](https://github.com/matrix-org/synapse/issues/7010))
|
||||
- Remove concept of a non-limited stream. ([\#7011](https://github.com/matrix-org/synapse/issues/7011))
|
||||
- Move catchup of replication streams logic to worker. ([\#7024](https://github.com/matrix-org/synapse/issues/7024), [\#7195](https://github.com/matrix-org/synapse/issues/7195), [\#7226](https://github.com/matrix-org/synapse/issues/7226), [\#7239](https://github.com/matrix-org/synapse/issues/7239), [\#7286](https://github.com/matrix-org/synapse/issues/7286), [\#7290](https://github.com/matrix-org/synapse/issues/7290), [\#7318](https://github.com/matrix-org/synapse/issues/7318), [\#7326](https://github.com/matrix-org/synapse/issues/7326), [\#7378](https://github.com/matrix-org/synapse/issues/7378), [\#7421](https://github.com/matrix-org/synapse/issues/7421))
|
||||
- Convert some of synapse.rest.media to async/await. ([\#7110](https://github.com/matrix-org/synapse/issues/7110), [\#7184](https://github.com/matrix-org/synapse/issues/7184), [\#7241](https://github.com/matrix-org/synapse/issues/7241))
|
||||
- De-duplicate / remove unused REST code for login and auth. ([\#7115](https://github.com/matrix-org/synapse/issues/7115))
|
||||
- Convert `*StreamRow` classes to inner classes. ([\#7116](https://github.com/matrix-org/synapse/issues/7116))
|
||||
- Clean up some LoggingContext code. ([\#7120](https://github.com/matrix-org/synapse/issues/7120), [\#7181](https://github.com/matrix-org/synapse/issues/7181), [\#7183](https://github.com/matrix-org/synapse/issues/7183), [\#7408](https://github.com/matrix-org/synapse/issues/7408), [\#7426](https://github.com/matrix-org/synapse/issues/7426))
|
||||
- Add explicit `instance_id` for USER_SYNC commands and remove implicit `conn_id` usage. ([\#7128](https://github.com/matrix-org/synapse/issues/7128))
|
||||
- Refactored the CAS authentication logic to a separate class. ([\#7136](https://github.com/matrix-org/synapse/issues/7136))
|
||||
- Run replication streamers on workers. ([\#7146](https://github.com/matrix-org/synapse/issues/7146))
|
||||
- Add tests for outbound device pokes. ([\#7157](https://github.com/matrix-org/synapse/issues/7157))
|
||||
- Fix device list update stream ids going backward. ([\#7158](https://github.com/matrix-org/synapse/issues/7158))
|
||||
- Use `stream.current_token()` and remove `stream_positions()`. ([\#7172](https://github.com/matrix-org/synapse/issues/7172))
|
||||
- Move client command handling out of TCP protocol. ([\#7185](https://github.com/matrix-org/synapse/issues/7185))
|
||||
- Move server command handling out of TCP protocol. ([\#7187](https://github.com/matrix-org/synapse/issues/7187))
|
||||
- Fix consistency of HTTP status codes reported in log lines. ([\#7188](https://github.com/matrix-org/synapse/issues/7188))
|
||||
- Only run one background database update at a time. ([\#7190](https://github.com/matrix-org/synapse/issues/7190))
|
||||
- Remove sent outbound device list pokes from the database. ([\#7192](https://github.com/matrix-org/synapse/issues/7192))
|
||||
- Add a background database update job to clear out duplicate `device_lists_outbound_pokes`. ([\#7193](https://github.com/matrix-org/synapse/issues/7193))
|
||||
- Remove some extraneous debugging log lines. ([\#7207](https://github.com/matrix-org/synapse/issues/7207))
|
||||
- Add explicit Python build tooling as dependencies for the snapcraft build. ([\#7213](https://github.com/matrix-org/synapse/issues/7213))
|
||||
- Add typing information to federation server code. ([\#7219](https://github.com/matrix-org/synapse/issues/7219))
|
||||
- Extend room admin api (`GET /_synapse/admin/v1/rooms`) with additional attributes. ([\#7225](https://github.com/matrix-org/synapse/issues/7225))
|
||||
- Unblacklist '/upgrade creates a new room' sytest for workers. ([\#7228](https://github.com/matrix-org/synapse/issues/7228))
|
||||
- Remove redundant checks on `daemonize` from synctl. ([\#7233](https://github.com/matrix-org/synapse/issues/7233))
|
||||
- Upgrade jQuery to v3.4.1 on fallback login/registration pages. ([\#7236](https://github.com/matrix-org/synapse/issues/7236))
|
||||
- Change log line that told user to implement onLogin/onRegister fallback js functions to a warning, instead of an info, so it's more visible. ([\#7237](https://github.com/matrix-org/synapse/issues/7237))
|
||||
- Correct the parameters of a test fixture. Contributed by Isaiah Singletary. ([\#7243](https://github.com/matrix-org/synapse/issues/7243))
|
||||
- Convert auth handler to async/await. ([\#7261](https://github.com/matrix-org/synapse/issues/7261))
|
||||
- Add some unit tests for replication. ([\#7278](https://github.com/matrix-org/synapse/issues/7278))
|
||||
- Improve typing annotations in `synapse.replication.tcp.streams.Stream`. ([\#7291](https://github.com/matrix-org/synapse/issues/7291))
|
||||
- Reduce log verbosity of url cache cleanup tasks. ([\#7295](https://github.com/matrix-org/synapse/issues/7295))
|
||||
- Fix sample SAML Service Provider configuration. Contributed by @frcl. ([\#7300](https://github.com/matrix-org/synapse/issues/7300))
|
||||
- Fix StreamChangeCache to work with multiple entities changing on the same stream id. ([\#7303](https://github.com/matrix-org/synapse/issues/7303))
|
||||
- Fix an incorrect import in IdentityHandler. ([\#7319](https://github.com/matrix-org/synapse/issues/7319))
|
||||
- Reduce logging verbosity for successful federation requests. ([\#7321](https://github.com/matrix-org/synapse/issues/7321))
|
||||
- Convert some federation handler code to async/await. ([\#7338](https://github.com/matrix-org/synapse/issues/7338))
|
||||
- Fix collation for postgres for unit tests. ([\#7359](https://github.com/matrix-org/synapse/issues/7359))
|
||||
- Convert RegistrationWorkerStore.is_server_admin and dependent code to async/await. ([\#7363](https://github.com/matrix-org/synapse/issues/7363))
|
||||
- Add an `instance_name` to `RDATA` and `POSITION` replication commands. ([\#7364](https://github.com/matrix-org/synapse/issues/7364))
|
||||
- Thread through instance name to replication client. ([\#7369](https://github.com/matrix-org/synapse/issues/7369))
|
||||
- Convert synapse.server_notices to async/await. ([\#7394](https://github.com/matrix-org/synapse/issues/7394))
|
||||
- Convert synapse.notifier to async/await. ([\#7395](https://github.com/matrix-org/synapse/issues/7395))
|
||||
- Fix issues with the Python package manifest. ([\#7404](https://github.com/matrix-org/synapse/issues/7404))
|
||||
- Prevent methods in `synapse.handlers.auth` from polling the homeserver config every request. ([\#7420](https://github.com/matrix-org/synapse/issues/7420))
|
||||
- Speed up fetching device lists changes when handling `/sync` requests. ([\#7423](https://github.com/matrix-org/synapse/issues/7423))
|
||||
- Run group attestation renewal in series rather than parallel for performance. ([\#7442](https://github.com/matrix-org/synapse/issues/7442))
|
||||
|
||||
|
||||
Synapse 1.12.4 (2020-04-23)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.12.4rc1 (2020-04-22)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Always send users their own device updates. ([\#7160](https://github.com/matrix-org/synapse/issues/7160))
|
||||
- Add support for handling GET requests for `account_data` on a worker. ([\#7311](https://github.com/matrix-org/synapse/issues/7311))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug that prevented cross-signing with users on worker-mode synapses. ([\#7255](https://github.com/matrix-org/synapse/issues/7255))
|
||||
- Do not treat display names as globs in push rules. ([\#7271](https://github.com/matrix-org/synapse/issues/7271))
|
||||
- Fix a bug with cross-signing devices belonging to remote users who did not share a room with any user on the local homeserver. ([\#7289](https://github.com/matrix-org/synapse/issues/7289))
|
||||
|
||||
Synapse 1.12.3 (2020-04-03)
|
||||
===========================
|
||||
|
||||
- Remove the the pin to Pillow 7.0 which was introduced in Synapse 1.12.2, and
|
||||
correctly fix the issue with building the Debian packages. ([\#7212](https://github.com/matrix-org/synapse/issues/7212))
|
||||
|
||||
Synapse 1.12.2 (2020-04-02)
|
||||
===========================
|
||||
|
||||
This release works around [an issue](https://github.com/matrix-org/synapse/issues/7208) with building the debian packages.
|
||||
|
||||
No other significant changes since 1.12.1.
|
||||
|
||||
Synapse 1.12.1 (2020-04-02)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.12.1rc1.
|
||||
|
||||
|
||||
Synapse 1.12.1rc1 (2020-03-31)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)). Introduced in v1.12.0.
|
||||
- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155)). Introduced in v1.12.0rc1.
|
||||
- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177)). Introduced in v1.11.0.
|
||||
|
||||
Synapse 1.12.0 (2020-03-23)
|
||||
===========================
|
||||
|
||||
Debian packages and Docker images are rebuilt using the latest versions of
|
||||
dependency libraries, including Twisted 20.3.0. **Please see security advisory
|
||||
below**.
|
||||
|
||||
Potential slow database update during upgrade
|
||||
---------------------------------------------
|
||||
|
||||
Synapse 1.12.0 includes a database update which is run as part of the upgrade,
|
||||
and which may take some time (several hours in the case of a large
|
||||
server). Synapse will not respond to HTTP requests while this update is taking
|
||||
place. For imformation on seeing if you are affected, and workaround if you
|
||||
are, see the [upgrade notes](UPGRADE.rst#upgrading-to-v1120).
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
Synapse may be vulnerable to request-smuggling attacks when it is used with a
|
||||
reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are
|
||||
described in
|
||||
[CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108)
|
||||
and
|
||||
[CVE-2020-10109](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10109).
|
||||
For a good introduction to this class of request-smuggling attacks, see
|
||||
https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
|
||||
|
||||
We are not aware of these vulnerabilities being exploited in the wild, and
|
||||
do not believe that they are exploitable with current versions of any reverse
|
||||
proxies. Nevertheless, we recommend that all Synapse administrators ensure that
|
||||
they have the latest versions of the Twisted library to ensure that their
|
||||
installation remains secure.
|
||||
|
||||
* Administrators using the [`matrix.org` Docker
|
||||
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
|
||||
packages from
|
||||
`matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
|
||||
should ensure that they have version 1.12.0 installed: these images include
|
||||
Twisted 20.3.0.
|
||||
* Administrators who have [installed Synapse from
|
||||
source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
|
||||
should upgrade Twisted within their virtualenv by running:
|
||||
```sh
|
||||
<path_to_virtualenv>/bin/pip install 'Twisted>=20.3.0'
|
||||
```
|
||||
* Administrators who have installed Synapse from distribution packages should
|
||||
consult the information from their distributions.
|
||||
|
||||
The `matrix.org` Synapse instance was not vulnerable to these vulnerabilities.
|
||||
|
||||
Advance notice of change to the default `git` branch for Synapse
|
||||
----------------------------------------------------------------
|
||||
|
||||
Currently, the default `git` branch for Synapse is `master`, which tracks the
|
||||
latest release.
|
||||
|
||||
After the release of Synapse 1.13.0, we intend to change this default to
|
||||
`develop`, which is the development tip. This is more consistent with common
|
||||
practice and modern `git` usage.
|
||||
|
||||
Although we try to keep `develop` in a stable state, there may be occasions
|
||||
where regressions creep in. Developers and distributors who have scripts which
|
||||
run builds using the default branch of `Synapse` should therefore consider
|
||||
pinning their scripts to `master`.
|
||||
|
||||
|
||||
Synapse 1.12.0rc1 (2020-03-19)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)):
|
||||
- Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. ([\#6965](https://github.com/matrix-org/synapse/issues/6965))
|
||||
- Validate the `alt_aliases` property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971))
|
||||
- Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986))
|
||||
- Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037))
|
||||
- Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941))
|
||||
- Synapse no longer uses room alias events to calculate room names for push notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966))
|
||||
- The room list endpoint no longer returns a list of aliases. ([\#6970](https://github.com/matrix-org/synapse/issues/6970))
|
||||
- Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. ([\#7034](https://github.com/matrix-org/synapse/issues/7034))
|
||||
- Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. ([\#6315](https://github.com/matrix-org/synapse/issues/6315))
|
||||
- Check that server_name is correctly set before running database updates. ([\#6982](https://github.com/matrix-org/synapse/issues/6982))
|
||||
- Break down monthly active users by `appservice_id` and emit via Prometheus. ([\#7030](https://github.com/matrix-org/synapse/issues/7030))
|
||||
- Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. ([\#7058](https://github.com/matrix-org/synapse/issues/7058), [\#7067](https://github.com/matrix-org/synapse/issues/7067))
|
||||
- Add an optional parameter to control whether other sessions are logged out when a user's password is modified. ([\#7085](https://github.com/matrix-org/synapse/issues/7085))
|
||||
- Add prometheus metrics for the number of active pushers. ([\#7103](https://github.com/matrix-org/synapse/issues/7103), [\#7106](https://github.com/matrix-org/synapse/issues/7106))
|
||||
- Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. ([\#7094](https://github.com/matrix-org/synapse/issues/7094))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. ([\#6572](https://github.com/matrix-org/synapse/issues/6572))
|
||||
- Fix a couple of bugs in email configuration handling. ([\#6962](https://github.com/matrix-org/synapse/issues/6962))
|
||||
- Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. ([\#6967](https://github.com/matrix-org/synapse/issues/6967))
|
||||
- Fix `duplicate key` error which was logged when rejoining a room over federation. ([\#6968](https://github.com/matrix-org/synapse/issues/6968))
|
||||
- Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. ([\#6990](https://github.com/matrix-org/synapse/issues/6990))
|
||||
- Fix py35-old CI by using native tox package. ([\#7018](https://github.com/matrix-org/synapse/issues/7018))
|
||||
- Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. ([\#7035](https://github.com/matrix-org/synapse/issues/7035))
|
||||
- Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. ([\#7044](https://github.com/matrix-org/synapse/issues/7044))
|
||||
- Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. ([\#7066](https://github.com/matrix-org/synapse/issues/7066))
|
||||
- Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. ([\#7070](https://github.com/matrix-org/synapse/issues/7070))
|
||||
- Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. ([\#7074](https://github.com/matrix-org/synapse/issues/7074))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Updated CentOS8 install instructions. Contributed by Richard Kellner. ([\#6925](https://github.com/matrix-org/synapse/issues/6925))
|
||||
- Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. ([\#6984](https://github.com/matrix-org/synapse/issues/6984))
|
||||
- Change date in [INSTALL.md](./INSTALL.md#tls-certificates) for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015))
|
||||
- Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. ([\#7048](https://github.com/matrix-org/synapse/issues/7048))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the unused query_auth federation endpoint per [MSC2451](https://github.com/matrix-org/matrix-doc/pull/2451). ([\#7026](https://github.com/matrix-org/synapse/issues/7026))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add type hints to `logging/context.py`. ([\#6309](https://github.com/matrix-org/synapse/issues/6309))
|
||||
- Add some clarifications to `README.md` in the database schema directory. ([\#6615](https://github.com/matrix-org/synapse/issues/6615))
|
||||
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003))
|
||||
- Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095))
|
||||
- Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953))
|
||||
- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954))
|
||||
- Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956))
|
||||
- Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957))
|
||||
- Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104))
|
||||
- Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. ([\#6979](https://github.com/matrix-org/synapse/issues/6979))
|
||||
- Update warning for incorrect database collation/ctype to include link to documentation. ([\#6985](https://github.com/matrix-org/synapse/issues/6985))
|
||||
- Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987))
|
||||
- Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019))
|
||||
- Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995))
|
||||
- Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020))
|
||||
- Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045))
|
||||
- Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063))
|
||||
|
||||
|
||||
Synapse 1.11.1 (2020-03-03)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ python 3.6 and to install each tool:
|
||||
|
||||
```
|
||||
# Install the dependencies
|
||||
pip install -U black flake8 isort
|
||||
pip install -U black flake8 flake8-comprehensions isort
|
||||
|
||||
# Run the linter script
|
||||
./scripts-dev/lint.sh
|
||||
|
||||
121
INSTALL.md
121
INSTALL.md
@@ -2,7 +2,6 @@
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
@@ -10,6 +9,7 @@
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
# Choosing your server name
|
||||
|
||||
@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5, 3.6, 3.7 or 3.8.
|
||||
- Python 3.5.2 or later, up to Python 3.8.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
@@ -70,7 +70,7 @@ pip install -U matrix-synapse
|
||||
```
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before)::
|
||||
file. To do this, run (in your virtualenv, as before):
|
||||
|
||||
```
|
||||
cd ~/synapse
|
||||
@@ -84,22 +84,24 @@ python -m synapse.app.homeserver \
|
||||
... substituting an appropriate value for `--server-name`.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the
|
||||
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
|
||||
for more information on key management.)
|
||||
for more information on key management).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and::
|
||||
run (e.g. `~/synapse`), and:
|
||||
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
### Platform-Specific Instructions
|
||||
|
||||
@@ -110,7 +112,7 @@ Installing prerequisites on Ubuntu or Debian:
|
||||
```
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
#### ArchLinux
|
||||
@@ -124,12 +126,21 @@ sudo pacman -S base-devel python python-pip \
|
||||
|
||||
#### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS 7 or Fedora 25:
|
||||
Installing prerequisites on CentOS 8 or Fedora>26:
|
||||
|
||||
```
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Installing prerequisites on CentOS 7 or Fedora<=25:
|
||||
|
||||
```
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
@@ -179,7 +190,7 @@ doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||
settings require a slightly more difficult installation process.
|
||||
|
||||
XXX: I suspect this is out of date.
|
||||
(XXX: I suspect this is out of date)
|
||||
|
||||
1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
|
||||
new user called `_synapse` and set that directory as the new user's home.
|
||||
@@ -187,7 +198,7 @@ XXX: I suspect this is out of date.
|
||||
write and execute permissions on the same memory space to be run from
|
||||
`/usr/local`.
|
||||
2. `su` to the new `_synapse` user and change to their home directory.
|
||||
3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse`
|
||||
3. Create a new virtualenv: `virtualenv -p python3 ~/.synapse`
|
||||
4. Source the virtualenv configuration located at
|
||||
`/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
|
||||
using the `.` command, rather than `bash`'s `source`.
|
||||
@@ -208,45 +219,6 @@ be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||
for Windows Server.
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
XXX a bunch of this is no longer relevant.
|
||||
|
||||
Synapse requires pip 8 or later, so if your OS provides too old a version you
|
||||
may need to manually upgrade it::
|
||||
|
||||
sudo pip install --upgrade pip
|
||||
|
||||
Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`.
|
||||
You can fix this by manually upgrading pip and virtualenv::
|
||||
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
You can next rerun `virtualenv -p python3 synapse` to update the virtual env.
|
||||
|
||||
Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`
|
||||
You can fix this by manually installing ndg-httpsclient::
|
||||
|
||||
pip install --upgrade ndg-httpsclient
|
||||
|
||||
Installing may fail with `mock requires setuptools>=17.1. Aborting installation`.
|
||||
You can fix this by upgrading setuptools::
|
||||
|
||||
pip install --upgrade setuptools
|
||||
|
||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||
refuse to run until you remove the temporary installation directory it
|
||||
created. To reset the installation::
|
||||
|
||||
rm -rf /tmp/pip_install_matrix
|
||||
|
||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.::
|
||||
|
||||
pip install twisted
|
||||
|
||||
## Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
@@ -305,7 +277,7 @@ For `buster` and `sid`, Synapse is available in the Debian repositories and
|
||||
it should be possible to install it with simply:
|
||||
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
|
||||
@@ -366,15 +338,17 @@ sudo pip install py-bcrypt
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
|
||||
### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py27-matrix-synapse`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
|
||||
### NixOS
|
||||
@@ -411,6 +385,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
@@ -418,15 +393,15 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
Note that, as pointed out in that document, this feature will not
|
||||
work with installs set up after November 2020.
|
||||
|
||||
work with installs set up after November 2019.
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md)
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
|
||||
## Email
|
||||
@@ -473,7 +448,7 @@ on your server even if `enable_registration` is `false`.
|
||||
## Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||
|
||||
## URL previews
|
||||
|
||||
@@ -482,10 +457,24 @@ turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
and explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed. This in turn requires the libxml2 library to be available - on
|
||||
This also requires the optional `lxml` and `netaddr` python dependencies to be
|
||||
installed. This in turn requires the `libxml2` library to be available - on
|
||||
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
|
||||
your OS.
|
||||
|
||||
# Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.:
|
||||
|
||||
```
|
||||
pip install twisted
|
||||
```
|
||||
|
||||
If you have any other problems, feel free to ask in
|
||||
[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
|
||||
|
||||
11
MANIFEST.in
11
MANIFEST.in
@@ -30,23 +30,24 @@ recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude Dockerfile
|
||||
exclude .codecov.yml
|
||||
exclude .coveragerc
|
||||
exclude .dockerignore
|
||||
exclude test_postgresql.sh
|
||||
exclude .editorconfig
|
||||
exclude Dockerfile
|
||||
exclude mypy.ini
|
||||
exclude sytest-blacklist
|
||||
exclude test_postgresql.sh
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .buildkite
|
||||
prune .circleci
|
||||
prune .codecov.yml
|
||||
prune .coveragerc
|
||||
prune .github
|
||||
prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune mypy.ini
|
||||
prune snap
|
||||
prune stubs
|
||||
|
||||
133
UPGRADE.rst
133
UPGRADE.rst
@@ -75,6 +75,139 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.13.0
|
||||
====================
|
||||
|
||||
|
||||
Incorrect database migration in old synapse versions
|
||||
----------------------------------------------------
|
||||
|
||||
A bug was introduced in Synapse 1.4.0 which could cause the room directory to
|
||||
be incomplete or empty if Synapse was upgraded directly from v1.2.1 or
|
||||
earlier, to versions between v1.4.0 and v1.12.x.
|
||||
|
||||
This will *not* be a problem for Synapse installations which were:
|
||||
* created at v1.4.0 or later,
|
||||
* upgraded via v1.3.x, or
|
||||
* upgraded straight from v1.2.1 or earlier to v1.13.0 or later.
|
||||
|
||||
If completeness of the room directory is a concern, installations which are
|
||||
affected can be repaired as follows:
|
||||
|
||||
1. Run the following sql from a `psql` or `sqlite3` console:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_rooms', '{}', 'current_state_events_membership');
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
|
||||
|
||||
2. Restart synapse.
|
||||
|
||||
New Single Sign-on HTML Templates
|
||||
---------------------------------
|
||||
|
||||
New templates (``sso_auth_confirm.html``, ``sso_auth_success.html``, and
|
||||
``sso_account_deactivated.html``) were added to Synapse. If your Synapse is
|
||||
configured to use SSO and a custom ``sso_redirect_confirm_template_dir``
|
||||
configuration then these templates will need to be copied from
|
||||
`synapse/res/templates <synapse/res/templates>`_ into that directory.
|
||||
|
||||
Synapse SSO Plugins Method Deprecation
|
||||
--------------------------------------
|
||||
|
||||
Plugins using the ``complete_sso_login`` method of
|
||||
``synapse.module_api.ModuleApi`` should update to using the async/await
|
||||
version ``complete_sso_login_async`` which includes additional checks. The
|
||||
non-async version is considered deprecated.
|
||||
|
||||
Rolling back to v1.12.4 after a failed upgrade
|
||||
----------------------------------------------
|
||||
|
||||
v1.13.0 includes a lot of large changes. If something problematic occurs, you
|
||||
may want to roll-back to a previous version of Synapse. Because v1.13.0 also
|
||||
includes a new database schema version, reverting that version is also required
|
||||
alongside the generic rollback instructions mentioned above. In short, to roll
|
||||
back to v1.12.4 you need to:
|
||||
|
||||
1. Stop the server
|
||||
2. Decrease the schema version in the database:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
UPDATE schema_version SET version = 57;
|
||||
|
||||
3. Downgrade Synapse by following the instructions for your installation method
|
||||
in the "Rolling back to older versions" section above.
|
||||
|
||||
|
||||
Upgrading to v1.12.0
|
||||
====================
|
||||
|
||||
This version includes a database update which is run as part of the upgrade,
|
||||
and which may take some time (several hours in the case of a large
|
||||
server). Synapse will not respond to HTTP requests while this update is taking
|
||||
place.
|
||||
|
||||
This is only likely to be a problem in the case of a server which is
|
||||
participating in many rooms.
|
||||
|
||||
0. As with all upgrades, it is recommended that you have a recent backup of
|
||||
your database which can be used for recovery in the event of any problems.
|
||||
|
||||
1. As an initial check to see if you will be affected, you can try running the
|
||||
following query from the `psql` or `sqlite3` console. It is safe to run it
|
||||
while Synapse is still running.
|
||||
|
||||
.. code:: sql
|
||||
|
||||
SELECT MAX(q.v) FROM (
|
||||
SELECT (
|
||||
SELECT ej.json AS v
|
||||
FROM state_events se INNER JOIN event_json ej USING (event_id)
|
||||
WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
|
||||
LIMIT 1
|
||||
) FROM rooms WHERE rooms.room_version IS NULL
|
||||
) q;
|
||||
|
||||
This query will take about the same amount of time as the upgrade process: ie,
|
||||
if it takes 5 minutes, then it is likely that Synapse will be unresponsive for
|
||||
5 minutes during the upgrade.
|
||||
|
||||
If you consider an outage of this duration to be acceptable, no further
|
||||
action is necessary and you can simply start Synapse 1.12.0.
|
||||
|
||||
If you would prefer to reduce the downtime, continue with the steps below.
|
||||
|
||||
2. The easiest workaround for this issue is to manually
|
||||
create a new index before upgrading. On PostgreSQL, his can be done as follows:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
CREATE INDEX CONCURRENTLY tmp_upgrade_1_12_0_index
|
||||
ON state_events(room_id) WHERE type = 'm.room.create';
|
||||
|
||||
The above query may take some time, but is also safe to run while Synapse is
|
||||
running.
|
||||
|
||||
We assume that no SQLite users have databases large enough to be
|
||||
affected. If you *are* affected, you can run a similar query, omitting the
|
||||
``CONCURRENTLY`` keyword. Note however that this operation may in itself cause
|
||||
Synapse to stop running for some time. Synapse admins are reminded that
|
||||
`SQLite is not recommended for use outside a test
|
||||
environment <https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql>`_.
|
||||
|
||||
3. Once the index has been created, the ``SELECT`` query in step 1 above should
|
||||
complete quickly. It is therefore safe to upgrade to Synapse 1.12.0.
|
||||
|
||||
4. Once Synapse 1.12.0 has successfully started and is responding to HTTP
|
||||
requests, the temporary index can be removed:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DROP INDEX tmp_upgrade_1_12_0_index;
|
||||
|
||||
Upgrading to v1.10.0
|
||||
====================
|
||||
|
||||
1
changelog.d/6391.feature
Normal file
1
changelog.d/6391.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse's cache factor can now be configured in `homeserver.yaml` by the `caches.global_factor` setting. Additionally, `caches.per_cache_factors` controls the cache factors for individual caches.
|
||||
1
changelog.d/7256.feature
Normal file
1
changelog.d/7256.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add OpenID Connect login/registration support. Contributed by Quentin Gliech, on behalf of [les Connecteurs](https://connecteu.rs).
|
||||
1
changelog.d/7281.misc
Normal file
1
changelog.d/7281.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add MultiWriterIdGenerator to support multiple concurrent writers of streams.
|
||||
1
changelog.d/7317.feature
Normal file
1
changelog.d/7317.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add room details admin endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
|
||||
1
changelog.d/7374.misc
Normal file
1
changelog.d/7374.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
1
changelog.d/7382.misc
Normal file
1
changelog.d/7382.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add typing annotations in `synapse.federation`.
|
||||
1
changelog.d/7396.misc
Normal file
1
changelog.d/7396.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert the room handler to async/await.
|
||||
1
changelog.d/7398.docker
Normal file
1
changelog.d/7398.docker
Normal file
@@ -0,0 +1 @@
|
||||
Update docker runtime image to Alpine v3.11. Contributed by @Starbix.
|
||||
1
changelog.d/7428.misc
Normal file
1
changelog.d/7428.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve performance of `get_e2e_cross_signing_key`.
|
||||
1
changelog.d/7429.misc
Normal file
1
changelog.d/7429.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve performance of `mark_as_sent_devices_by_remote`.
|
||||
1
changelog.d/7435.feature
Normal file
1
changelog.d/7435.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow for using more than one spam checker module at once.
|
||||
1
changelog.d/7436.misc
Normal file
1
changelog.d/7436.misc
Normal file
@@ -0,0 +1 @@
|
||||
Support any process writing to cache invalidation stream.
|
||||
1
changelog.d/7440.misc
Normal file
1
changelog.d/7440.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactor event persistence database functions in preparation for allowing them to be run on non-master processes.
|
||||
1
changelog.d/7445.misc
Normal file
1
changelog.d/7445.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to the SAML handler.
|
||||
1
changelog.d/7448.misc
Normal file
1
changelog.d/7448.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove storage method `get_hosts_in_room` that is no longer called anywhere.
|
||||
1
changelog.d/7449.misc
Normal file
1
changelog.d/7449.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix some typos in the notice_expiry templates.
|
||||
1
changelog.d/7458.doc
Normal file
1
changelog.d/7458.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update information about mapping providers for SAML and OpenID.
|
||||
1
changelog.d/7459.misc
Normal file
1
changelog.d/7459.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert the federation handler to async/await.
|
||||
1
changelog.d/7460.misc
Normal file
1
changelog.d/7460.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert the search handler to async/await.
|
||||
1
changelog.d/7470.misc
Normal file
1
changelog.d/7470.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix linting errors in new version of Flake8.
|
||||
1
changelog.d/7475.misc
Normal file
1
changelog.d/7475.misc
Normal file
@@ -0,0 +1 @@
|
||||
Have all instance correctly respond to REPLICATE command.
|
||||
1
changelog.d/7477.doc
Normal file
1
changelog.d/7477.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix copy-paste error in `ServerNoticesConfig` docstring. Contributed by @ptman.
|
||||
1
changelog.d/7482.bugfix
Normal file
1
changelog.d/7482.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix Redis reconnection logic that can result in missed updates over replication if master reconnects to Redis without restarting.
|
||||
1
changelog.d/7490.misc
Normal file
1
changelog.d/7490.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up replication unit tests.
|
||||
1
changelog.d/7491.misc
Normal file
1
changelog.d/7491.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move event stream handling out of slave store.
|
||||
1
changelog.d/7492.misc
Normal file
1
changelog.d/7492.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow censoring of events to happen on workers.
|
||||
1
changelog.d/7493.misc
Normal file
1
changelog.d/7493.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move EventStream handling into default ReplicationDataHandler.
|
||||
1
changelog.d/7495.feature
Normal file
1
changelog.d/7495.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `instance_map` config and route replication calls.
|
||||
@@ -15,10 +15,9 @@ services:
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
environment:
|
||||
- SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
|
||||
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./matrix-config/homeserver.yaml:/etc/homeserver.yaml
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
@@ -58,7 +57,7 @@ services:
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
# ensure the database gets created correctly
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
|
||||
- POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up additional recording rules
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"iteration": 1561447718159,
|
||||
"iteration": 1584612489167,
|
||||
"links": [
|
||||
{
|
||||
"asDropdown": true,
|
||||
@@ -34,6 +34,7 @@
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -52,12 +53,14 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 1
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 75,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
@@ -72,7 +75,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -151,6 +156,7 @@
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
@@ -158,6 +164,7 @@
|
||||
"x": 12,
|
||||
"y": 1
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 33,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
@@ -172,7 +179,9 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -302,12 +311,14 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 0,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 10
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 107,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
@@ -322,7 +333,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -425,12 +438,14 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 0,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 19
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 118,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
@@ -445,7 +460,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -542,6 +559,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -1361,6 +1379,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -1732,6 +1751,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -2439,6 +2459,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -2635,6 +2656,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -2650,11 +2672,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 61
|
||||
"y": 33
|
||||
},
|
||||
"id": 79,
|
||||
"legend": {
|
||||
@@ -2670,6 +2693,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -2684,8 +2710,13 @@
|
||||
"expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "txn rate",
|
||||
"legendFormat": "successful txn rate",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"legendFormat": "failed txn rate",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
@@ -2736,11 +2767,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 61
|
||||
"y": 33
|
||||
},
|
||||
"id": 83,
|
||||
"legend": {
|
||||
@@ -2756,6 +2788,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -2829,11 +2864,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 70
|
||||
"y": 42
|
||||
},
|
||||
"id": 109,
|
||||
"legend": {
|
||||
@@ -2849,6 +2885,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -2923,11 +2962,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 70
|
||||
"y": 42
|
||||
},
|
||||
"id": 111,
|
||||
"legend": {
|
||||
@@ -2943,6 +2983,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3009,6 +3052,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -3024,12 +3068,14 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 62
|
||||
"y": 34
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 51,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
@@ -3044,6 +3090,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3112,6 +3161,95 @@
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"description": "",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 34
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 134,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"hideZero": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})",
|
||||
"legendFormat": "{{kind}} {{app_id}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Active pusher instances by app",
|
||||
"tooltip": {
|
||||
"shared": false,
|
||||
"sort": 2,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"repeat": null,
|
||||
@@ -3120,6 +3258,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -3523,6 +3662,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -3540,6 +3680,7 @@
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
@@ -3562,6 +3703,9 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3630,6 +3774,7 @@
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
@@ -3652,6 +3797,9 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3720,6 +3868,7 @@
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
@@ -3742,6 +3891,9 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3810,6 +3962,7 @@
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
@@ -3832,6 +3985,9 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -3921,6 +4077,7 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -4010,6 +4167,7 @@
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -4076,6 +4234,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -4540,6 +4699,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -5060,6 +5220,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -5079,7 +5240,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 67
|
||||
"y": 39
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
@@ -5095,6 +5256,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5198,7 +5360,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 67
|
||||
"y": 39
|
||||
},
|
||||
"id": 41,
|
||||
"legend": {
|
||||
@@ -5214,6 +5376,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5286,7 +5449,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 74
|
||||
"y": 46
|
||||
},
|
||||
"id": 42,
|
||||
"legend": {
|
||||
@@ -5302,6 +5465,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5373,7 +5537,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 74
|
||||
"y": 46
|
||||
},
|
||||
"id": 43,
|
||||
"legend": {
|
||||
@@ -5389,6 +5553,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5460,7 +5625,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 81
|
||||
"y": 53
|
||||
},
|
||||
"id": 113,
|
||||
"legend": {
|
||||
@@ -5476,6 +5641,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5546,7 +5712,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 81
|
||||
"y": 53
|
||||
},
|
||||
"id": 115,
|
||||
"legend": {
|
||||
@@ -5562,6 +5728,7 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5573,7 +5740,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(synapse_replication_tcp_protocol_close_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}-{{index}} {{reason_type}}",
|
||||
@@ -5628,6 +5795,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -5643,11 +5811,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 13
|
||||
"y": 40
|
||||
},
|
||||
"id": 67,
|
||||
"legend": {
|
||||
@@ -5663,7 +5832,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5679,7 +5850,7 @@
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}-{{index}} ",
|
||||
"legendFormat": "{{job}}-{{index}} {{name}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -5731,11 +5902,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 13
|
||||
"y": 40
|
||||
},
|
||||
"id": 71,
|
||||
"legend": {
|
||||
@@ -5751,7 +5923,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5819,11 +5993,12 @@
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 22
|
||||
"y": 49
|
||||
},
|
||||
"id": 121,
|
||||
"interval": "",
|
||||
@@ -5840,7 +6015,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {},
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
@@ -5909,6 +6086,7 @@
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
@@ -6607,7 +6785,7 @@
|
||||
}
|
||||
],
|
||||
"refresh": "5m",
|
||||
"schemaVersion": 18,
|
||||
"schemaVersion": 22,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"matrix"
|
||||
@@ -6616,7 +6794,7 @@
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"tags": [],
|
||||
"selected": true,
|
||||
"text": "Prometheus",
|
||||
"value": "Prometheus"
|
||||
},
|
||||
@@ -6638,6 +6816,7 @@
|
||||
"auto_count": 100,
|
||||
"auto_min": "30s",
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "auto",
|
||||
"value": "$__auto_interval_bucket_size"
|
||||
},
|
||||
@@ -6719,9 +6898,9 @@
|
||||
"allFormat": "regex wildcard",
|
||||
"allValue": "",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"text": "synapse",
|
||||
"value": [
|
||||
"$__all"
|
||||
"synapse"
|
||||
]
|
||||
},
|
||||
"datasource": "$datasource",
|
||||
@@ -6751,7 +6930,9 @@
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
"value": [
|
||||
"$__all"
|
||||
]
|
||||
},
|
||||
"datasource": "$datasource",
|
||||
"definition": "",
|
||||
@@ -6810,5 +6991,5 @@
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 10
|
||||
"version": 19
|
||||
}
|
||||
@@ -1,150 +1,2 @@
|
||||
# Setup Synapse with Workers and Systemd
|
||||
|
||||
This is a setup for managing synapse with systemd including support for
|
||||
managing workers. It provides a `matrix-synapse`, as well as a
|
||||
`matrix-synapse-worker@` service for any workers you require. Additionally to
|
||||
group the required services it sets up a `matrix.target`. You can use this to
|
||||
automatically start any bot- or bridge-services. More on this in
|
||||
[Bots and Bridges](#bots-and-bridges).
|
||||
|
||||
See the folder [system](system) for any service and target files.
|
||||
|
||||
The folder [workers](workers) contains an example configuration for the
|
||||
`federation_reader` worker. Pay special attention to the name of the
|
||||
configuration file. In order to work with the `matrix-synapse-worker@.service`
|
||||
service, it needs to have the exact same name as the worker app.
|
||||
|
||||
This setup expects neither the homeserver nor any workers to fork. Forking is
|
||||
handled by systemd.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Adjust your matrix configs. Make sure that the worker config files have the
|
||||
exact same name as the worker app. Compare `matrix-synapse-worker@.service` for
|
||||
why. You can find an example worker config in the [workers](workers) folder. See
|
||||
below for relevant settings in the `homeserver.yaml`.
|
||||
2. Copy the `*.service` and `*.target` files in [system](system) to
|
||||
`/etc/systemd/system`.
|
||||
3. `systemctl enable matrix-synapse.service` this adds the homeserver
|
||||
app to the `matrix.target`
|
||||
4. *Optional.* `systemctl enable
|
||||
matrix-synapse-worker@federation_reader.service` this adds the federation_reader
|
||||
app to the `matrix-synapse.service`
|
||||
5. *Optional.* Repeat step 4 for any additional workers you require.
|
||||
6. *Optional.* Add any bots or bridges by enabling them.
|
||||
7. Start all matrix related services via `systemctl start matrix.target`
|
||||
8. *Optional.* Enable autostart of all matrix related services on system boot
|
||||
via `systemctl enable matrix.target`
|
||||
|
||||
## Usage
|
||||
|
||||
After you have setup you can use the following commands to manage your synapse
|
||||
installation:
|
||||
|
||||
```
|
||||
# Start matrix-synapse, all workers and any enabled bots or bridges.
|
||||
systemctl start matrix.target
|
||||
|
||||
# Restart matrix-synapse and all workers (not necessarily restarting bots
|
||||
# or bridges, see "Bots and Bridges")
|
||||
systemctl restart matrix-synapse.service
|
||||
|
||||
# Stop matrix-synapse and all workers (not necessarily restarting bots
|
||||
# or bridges, see "Bots and Bridges")
|
||||
systemctl stop matrix-synapse.service
|
||||
|
||||
# Restart a specific worker (i. e. federation_reader), the homeserver is
|
||||
# unaffected by this.
|
||||
systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
|
||||
# Add a new worker (assuming all configs are setup already)
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.service
|
||||
```
|
||||
|
||||
## The Configs
|
||||
|
||||
Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork.
|
||||
|
||||
```
|
||||
worker_app: synapse.app.homeserver
|
||||
daemonize: false
|
||||
```
|
||||
|
||||
None of the workers should fork, as forking is handled by systemd. Hence make
|
||||
sure this is present in all worker config files.
|
||||
|
||||
```
|
||||
worker_daemonize: false
|
||||
```
|
||||
|
||||
The config files of all workers are expected to be located in
|
||||
`/etc/matrix-synapse/workers`. If you want to use a different location you have
|
||||
to edit the provided `*.service` files accordingly.
|
||||
|
||||
## Bots and Bridges
|
||||
|
||||
Most bots and bridges do not care if the homeserver goes down or is restarted.
|
||||
Depending on the implementation this may crash them though. So look up the docs
|
||||
or ask the community of the specific bridge or bot you want to run to make sure
|
||||
you choose the correct setup.
|
||||
|
||||
Whichever configuration you choose, after the setup the following will enable
|
||||
automatically starting (and potentially restarting) your bot/bridge with the
|
||||
`matrix.target`.
|
||||
|
||||
```
|
||||
systemctl enable <yourBotOrBridgeName>.service
|
||||
```
|
||||
|
||||
**Note** that from an inactive synapse the bots/bridges will only be started with
|
||||
synapse if you start the `matrix.target`, not if you start the
|
||||
`matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service`
|
||||
as *just* synapse, but `matrix.target` being anything matrix related, including
|
||||
synapse and any and all enabled bots and bridges.
|
||||
|
||||
### Start with synapse but ignore synapse going down
|
||||
|
||||
If the bridge can handle shutdowns of the homeserver you'll want to install the
|
||||
service in the `matrix.target` and optionally add a
|
||||
`After=matrix-synapse.service` dependency to have the bot/bridge start after
|
||||
synapse on starting everything.
|
||||
|
||||
In this case the service file should look like this.
|
||||
|
||||
```
|
||||
[Unit]
|
||||
# ...
|
||||
# Optional, this will only ensure that if you start everything, synapse will
|
||||
# be started before the bot/bridge will be started.
|
||||
After=matrix-synapse.service
|
||||
|
||||
[Service]
|
||||
# ...
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix.target
|
||||
```
|
||||
|
||||
### Stop/restart when synapse stops/restarts
|
||||
|
||||
If the bridge can't handle shutdowns of the homeserver you'll still want to
|
||||
install the service in the `matrix.target` but also have to specify the
|
||||
`After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service`
|
||||
dependencies to have the bot/bridge stop/restart with synapse.
|
||||
|
||||
In this case the service file should look like this.
|
||||
|
||||
```
|
||||
[Unit]
|
||||
# ...
|
||||
# Mandatory
|
||||
After=matrix-synapse.service
|
||||
BindsTo=matrix-synapse.service
|
||||
|
||||
[Service]
|
||||
# ...
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix.target
|
||||
```
|
||||
The documentation for using systemd to manage synapse workers is now part of
|
||||
the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
[Unit]
|
||||
Description=Synapse Matrix Worker
|
||||
After=matrix-synapse.service
|
||||
BindsTo=matrix-synapse.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse-%i
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix-synapse.service
|
||||
@@ -1,7 +0,0 @@
|
||||
[Unit]
|
||||
Description=Contains matrix services like synapse, bridges and bots
|
||||
After=network.target
|
||||
AllowIsolate=no
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
43
debian/changelog
vendored
43
debian/changelog
vendored
@@ -1,3 +1,46 @@
|
||||
<<<<<<< HEAD
|
||||
matrix-synapse-py3 (1.12.3ubuntu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Add information about .well-known files to Debian installation scripts.
|
||||
|
||||
-- Patrick Cloke <patrickc@matrix.org> Mon, 06 Apr 2020 10:10:38 -0400
|
||||
=======
|
||||
matrix-synapse-py3 (1.12.4) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.12.4.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 23 Apr 2020 10:58:14 -0400
|
||||
>>>>>>> master
|
||||
|
||||
matrix-synapse-py3 (1.12.3) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Update the Debian build scripts to handle the new installation paths
|
||||
for the support libraries introduced by Pillow 7.1.1.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.12.3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 03 Apr 2020 10:55:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.12.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.12.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 02 Apr 2020 19:02:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.12.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.12.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 02 Apr 2020 11:30:47 +0000
|
||||
|
||||
matrix-synapse-py3 (1.12.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.12.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 23 Mar 2020 12:13:03 +0000
|
||||
|
||||
matrix-synapse-py3 (1.11.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.11.1.
|
||||
|
||||
13
debian/po/templates.pot
vendored
13
debian/po/templates.pot
vendored
@@ -1,14 +1,14 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the matrix-synapse package.
|
||||
# This file is distributed under the same license as the matrix-synapse-py3 package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: matrix-synapse\n"
|
||||
"Report-Msgid-Bugs-To: matrix-synapse@packages.debian.org\n"
|
||||
"POT-Creation-Date: 2017-02-21 07:51+0000\n"
|
||||
"Project-Id-Version: matrix-synapse-py3\n"
|
||||
"Report-Msgid-Bugs-To: matrix-synapse-py3@packages.debian.org\n"
|
||||
"POT-Creation-Date: 2020-04-06 16:39-0400\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
@@ -28,7 +28,10 @@ msgstr ""
|
||||
#: ../templates:1001
|
||||
msgid ""
|
||||
"The name that this homeserver will appear as, to clients and other servers "
|
||||
"via federation. This name should match the SRV record published in DNS."
|
||||
"via federation. This is normally the public hostname of the server running "
|
||||
"synapse, but can be different if you set up delegation. Please refer to the "
|
||||
"delegation documentation in this case: https://github.com/matrix-org/synapse/"
|
||||
"blob/master/docs/delegate.md."
|
||||
msgstr ""
|
||||
|
||||
#. Type: boolean
|
||||
|
||||
33
debian/rules
vendored
33
debian/rules
vendored
@@ -15,17 +15,38 @@ override_dh_installinit:
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
|
||||
# (executables and shared libs) in the package, and looks for the shared
|
||||
# libraries that they depend on. It then adds a dependency on the package that
|
||||
# contains that library to the package.
|
||||
#
|
||||
# We make two modifications to that process...
|
||||
#
|
||||
override_dh_shlibdeps:
|
||||
# make the postgres package's dependencies a recommendation
|
||||
# rather than a hard dependency.
|
||||
# Firstly, postgres is not a hard dependency for us, so we want to make
|
||||
# the things that psycopg2 depends on (such as libpq) be
|
||||
# recommendations rather than hard dependencies. We do so by
|
||||
# running dpkg-shlibdeps manually on psycopg2's libs.
|
||||
#
|
||||
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
|
||||
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
|
||||
-pshlibs1 -dRecommends
|
||||
|
||||
# all the other dependencies can be normal 'Depends' requirements,
|
||||
# except for PIL's, which is self-contained and which confuses
|
||||
# dpkg-shlibdeps.
|
||||
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
|
||||
# secondly, we exclude PIL's libraries from the process. They are known
|
||||
# to be self-contained, but they have interdependencies and
|
||||
# dpkg-shlibdeps doesn't know how to resolve them.
|
||||
#
|
||||
# As of Pillow 7.1.0, these libraries are in
|
||||
# site-packages/Pillow.libs. Previously, they were in
|
||||
# site-packages/PIL/.libs.
|
||||
#
|
||||
# (we also need to exclude psycopg2, of course, since we've already
|
||||
# dealt with that.)
|
||||
#
|
||||
dh_shlibdeps \
|
||||
-X site-packages/PIL/.libs \
|
||||
-X site-packages/Pillow.libs \
|
||||
-X site-packages/psycopg2
|
||||
|
||||
override_dh_virtualenv:
|
||||
./debian/build_virtualenv
|
||||
|
||||
6
debian/templates
vendored
6
debian/templates
vendored
@@ -2,8 +2,10 @@ Template: matrix-synapse/server-name
|
||||
Type: string
|
||||
_Description: Name of the server:
|
||||
The name that this homeserver will appear as, to clients and other
|
||||
servers via federation. This name should match the SRV record
|
||||
published in DNS.
|
||||
servers via federation. This is normally the public hostname of the
|
||||
server running synapse, but can be different if you set up delegation.
|
||||
Please refer to the delegation documentation in this case:
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/delegate.md.
|
||||
|
||||
Template: matrix-synapse/report-stats
|
||||
Type: boolean
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
### Stage 1: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
|
||||
|
||||
# xmlsec is required for saml support
|
||||
RUN apk add --no-cache --virtual .runtime_deps \
|
||||
|
||||
34
docs/admin_api/room_membership.md
Normal file
34
docs/admin_api/room_membership.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Edit Room Membership API
|
||||
|
||||
This API allows an administrator to join an user account with a given `user_id`
|
||||
to a room with a given `room_id_or_alias`. You can only modify the membership of
|
||||
local users. The server administrator must be in the room and have permission to
|
||||
invite users.
|
||||
|
||||
## Parameters
|
||||
|
||||
The following parameters are available:
|
||||
|
||||
* `user_id` - Fully qualified user: for example, `@user:server.com`.
|
||||
* `room_id_or_alias` - The room identifier or alias to join: for example,
|
||||
`!636q39766251:server.com`.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/join/<room_id_or_alias>
|
||||
|
||||
{
|
||||
"user_id": "@user:server.com"
|
||||
}
|
||||
```
|
||||
|
||||
Including an `access_token` of a server admin.
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"room_id": "!636q39766251:server.com"
|
||||
}
|
||||
```
|
||||
@@ -11,8 +11,21 @@ The following query parameters are available:
|
||||
* `from` - Offset in the returned list. Defaults to `0`.
|
||||
* `limit` - Maximum amount of rooms to return. Defaults to `100`.
|
||||
* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
|
||||
- `alphabetical` - Rooms are ordered alphabetically by room name. This is the default.
|
||||
- `size` - Rooms are ordered by the number of members. Largest to smallest.
|
||||
- `alphabetical` - Same as `name`. This is deprecated.
|
||||
- `size` - Same as `joined_members`. This is deprecated.
|
||||
- `name` - Rooms are ordered alphabetically by room name. This is the default.
|
||||
- `canonical_alias` - Rooms are ordered alphabetically by main alias address of the room.
|
||||
- `joined_members` - Rooms are ordered by the number of members. Largest to smallest.
|
||||
- `joined_local_members` - Rooms are ordered by the number of local members. Largest to smallest.
|
||||
- `version` - Rooms are ordered by room version. Largest to smallest.
|
||||
- `creator` - Rooms are ordered alphabetically by creator of the room.
|
||||
- `encryption` - Rooms are ordered alphabetically by the end-to-end encryption algorithm.
|
||||
- `federatable` - Rooms are ordered by whether the room is federatable.
|
||||
- `public` - Rooms are ordered by visibility in room list.
|
||||
- `join_rules` - Rooms are ordered alphabetically by join rules of the room.
|
||||
- `guest_access` - Rooms are ordered alphabetically by guest access option of the room.
|
||||
- `history_visibility` - Rooms are ordered alphabetically by visibility of history of the room.
|
||||
- `state_events` - Rooms are ordered by number of state events. Largest to smallest.
|
||||
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
||||
@@ -26,6 +39,16 @@ The following fields are possible in the JSON response body:
|
||||
- `name` - The name of the room.
|
||||
- `canonical_alias` - The canonical (main) alias address of the room.
|
||||
- `joined_members` - How many users are currently in the room.
|
||||
- `joined_local_members` - How many local users are currently in the room.
|
||||
- `version` - The version of the room as a string.
|
||||
- `creator` - The `user_id` of the room creator.
|
||||
- `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
|
||||
- `federatable` - Whether users on other servers can join this room.
|
||||
- `public` - Whether the room is visible in room directory.
|
||||
- `join_rules` - The type of rules used for users wishing to join this room. One of: ["public", "knock", "invite", "private"].
|
||||
- `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
|
||||
- `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
- `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
* `offset` - The current pagination offset in rooms. This parameter should be
|
||||
used instead of `next_token` for room offset as `next_token` is
|
||||
not intended to be parsed.
|
||||
@@ -60,14 +83,34 @@ Response:
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326
|
||||
"joined_members": 8326,
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
},
|
||||
... (8 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -92,7 +135,17 @@ Response:
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -117,14 +170,34 @@ Response:
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326
|
||||
"joined_members": 8326,
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
},
|
||||
... (98 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -154,6 +227,16 @@ Response:
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
},
|
||||
... (48 hidden items) ...
|
||||
{
|
||||
@@ -161,6 +244,16 @@ Response:
|
||||
"name": "weechat-matrix",
|
||||
"canonical_alias": "#weechat-matrix:termina.org.uk",
|
||||
"joined_members": 137
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:termina.org.uk",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
}
|
||||
],
|
||||
"offset": 100,
|
||||
@@ -171,3 +264,57 @@ Response:
|
||||
|
||||
Once the `next_token` parameter is no longer present, we know we've reached the
|
||||
end of the list.
|
||||
|
||||
# DRAFT: Room Details API
|
||||
|
||||
The Room Details admin API allows server admins to get all details of a room.
|
||||
|
||||
This API is still a draft and details might change!
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `room_id` - The ID of the room.
|
||||
* `name` - The name of the room.
|
||||
* `canonical_alias` - The canonical (main) alias address of the room.
|
||||
* `joined_members` - How many users are currently in the room.
|
||||
* `joined_local_members` - How many local users are currently in the room.
|
||||
* `version` - The version of the room as a string.
|
||||
* `creator` - The `user_id` of the room creator.
|
||||
* `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
|
||||
* `federatable` - Whether users on other servers can join this room.
|
||||
* `public` - Whether the room is visible in room directory.
|
||||
* `join_rules` - The type of rules used for users wishing to join this room. One of: ["public", "knock", "invite", "private"].
|
||||
* `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
|
||||
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
}
|
||||
```
|
||||
|
||||
@@ -33,11 +33,22 @@ with a body of:
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``displayname`` is optional and defaults to ``user_id``.
|
||||
The parameter ``threepids`` is optional.
|
||||
The parameter ``avatar_url`` is optional.
|
||||
The parameter ``admin`` is optional and defaults to 'false'.
|
||||
The parameter ``deactivated`` is optional and defaults to 'false'.
|
||||
The parameter ``displayname`` is optional and defaults to the value of
|
||||
``user_id``.
|
||||
|
||||
The parameter ``threepids`` is optional and allows setting the third-party IDs
|
||||
(email, msisdn) belonging to a user.
|
||||
|
||||
The parameter ``avatar_url`` is optional. Must be a [MXC
|
||||
URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
|
||||
The parameter ``admin`` is optional and defaults to ``false``.
|
||||
|
||||
The parameter ``deactivated`` is optional and defaults to ``false``.
|
||||
|
||||
The parameter ``password`` is optional. If provided, the user's password is
|
||||
updated and all devices are logged out.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
List Accounts
|
||||
@@ -50,16 +61,25 @@ The api is::
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
The parameters ``from`` and ``limit`` are required only for pagination.
|
||||
By default, a ``limit`` of 100 is used.
|
||||
The parameter ``user_id`` can be used to select only users with user ids that
|
||||
contain this value.
|
||||
The parameter ``guests=false`` can be used to exclude guest users,
|
||||
default is to include guest users.
|
||||
The parameter ``deactivated=true`` can be used to include deactivated users,
|
||||
default is to exclude deactivated users.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users left.
|
||||
It returns a JSON body like the following:
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token``
|
||||
from a previous call.
|
||||
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only users with user IDs
|
||||
that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
|
||||
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
|
||||
A JSON body is returned with the following shape:
|
||||
|
||||
.. code:: json
|
||||
|
||||
@@ -71,19 +91,29 @@ It returns a JSON body like the following:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0
|
||||
"deactivated": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"password_hash": "<password_hash2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0
|
||||
"deactivated": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
],
|
||||
"next_token": "100"
|
||||
"next_token": "100",
|
||||
"total": 200
|
||||
}
|
||||
|
||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
Query Account
|
||||
=============
|
||||
@@ -168,11 +198,14 @@ with a body of:
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"new_password": "<secret>"
|
||||
"new_password": "<secret>",
|
||||
"logout_devices": true,
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``new_password`` is required.
|
||||
The parameter ``logout_devices`` is optional and defaults to ``true``.
|
||||
|
||||
Get whether a user is a server administrator or not
|
||||
===================================================
|
||||
|
||||
@@ -23,9 +23,13 @@ namespaces:
|
||||
users: # List of users we're interested in
|
||||
- exclusive: <bool>
|
||||
regex: <regex>
|
||||
group_id: <group>
|
||||
- ...
|
||||
aliases: [] # List of aliases we're interested in
|
||||
rooms: [] # List of room ids we're interested in
|
||||
```
|
||||
|
||||
`exclusive`: If enabled, only this application service is allowed to register users in its namespace(s).
|
||||
`group_id`: All users of this application service are dynamically joined to this group. This is useful for e.g user organisation or flairs.
|
||||
|
||||
See the [spec](https://matrix.org/docs/spec/application_service/unstable.html) for further details on how application services work.
|
||||
|
||||
@@ -30,7 +30,7 @@ The necessary tools are detailed below.
|
||||
|
||||
Install `flake8` with:
|
||||
|
||||
pip install --upgrade flake8
|
||||
pip install --upgrade flake8 flake8-comprehensions
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
|
||||
64
docs/dev/cas.md
Normal file
64
docs/dev/cas.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# How to test CAS as a developer without a server
|
||||
|
||||
The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an
|
||||
easy to run CAS implementation built on top of Django.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Create a new virtualenv: `python3 -m venv <your virtualenv>`
|
||||
2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate`
|
||||
3. Install Django and django-mama-cas:
|
||||
```
|
||||
python -m pip install "django<3" "django-mama-cas==2.4.0"
|
||||
```
|
||||
4. Create a Django project in the current directory:
|
||||
```
|
||||
django-admin startproject cas_test .
|
||||
```
|
||||
5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas
|
||||
6. Setup the SQLite database: `python manage.py migrate`
|
||||
7. Create a user:
|
||||
```
|
||||
python manage.py createsuperuser
|
||||
```
|
||||
1. Use whatever you want as the username and password.
|
||||
2. Leave the other fields blank.
|
||||
8. Use the built-in Django test server to serve the CAS endpoints on port 8000:
|
||||
```
|
||||
python manage.py runserver
|
||||
```
|
||||
|
||||
You should now have a Django project configured to serve CAS authentication with
|
||||
a single user created.
|
||||
|
||||
## Configure Synapse (and Riot) to use CAS
|
||||
|
||||
1. Modify your `homeserver.yaml` to enable CAS and point it to your locally
|
||||
running Django test server:
|
||||
```yaml
|
||||
cas_config:
|
||||
enabled: true
|
||||
server_url: "http://localhost:8000"
|
||||
service_url: "http://localhost:8081"
|
||||
#displayname_attribute: name
|
||||
#required_attributes:
|
||||
# name: value
|
||||
```
|
||||
2. Restart Synapse.
|
||||
|
||||
Note that the above configuration assumes the homeserver is running on port 8081
|
||||
and that the CAS server is on port 8000, both on localhost.
|
||||
|
||||
## Testing the configuration
|
||||
|
||||
Then in Riot:
|
||||
|
||||
1. Visit the login page with a Riot pointing at your homeserver.
|
||||
2. Click the Single Sign-On button.
|
||||
3. Login using the credentials created with `createsuperuser`.
|
||||
4. You should be logged in.
|
||||
|
||||
If you want to repeat this process you'll need to manually logout first:
|
||||
|
||||
1. http://localhost:8000/admin/
|
||||
2. Click "logout" in the top right.
|
||||
175
docs/dev/oidc.md
Normal file
175
docs/dev/oidc.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# How to test OpenID Connect
|
||||
|
||||
Any OpenID Connect Provider (OP) should work with Synapse, as long as it supports the authorization code flow.
|
||||
There are a few options for that:
|
||||
|
||||
- start a local OP. Synapse has been tested with [Hydra][hydra] and [Dex][dex-idp].
|
||||
Note that for an OP to work, it should be served under a secure (HTTPS) origin.
|
||||
A certificate signed with a self-signed, locally trusted CA should work. In that case, start Synapse with a `SSL_CERT_FILE` environment variable set to the path of the CA.
|
||||
- use a publicly available OP. Synapse has been tested with [Google][google-idp].
|
||||
- setup a SaaS OP, like [Auth0][auth0] and [Okta][okta]. Auth0 has a free tier which has been tested with Synapse.
|
||||
|
||||
[google-idp]: https://developers.google.com/identity/protocols/OpenIDConnect#authenticatingtheuser
|
||||
[auth0]: https://auth0.com/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[hydra]: https://www.ory.sh/docs/hydra/
|
||||
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
|
||||
### [Dex][dex-idp]
|
||||
|
||||
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider, with some external database, it can be configured with static passwords in a config file.
|
||||
|
||||
Follow the [Getting Started guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md) to install Dex.
|
||||
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
|
||||
```yaml
|
||||
staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse base url]/_synapse/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dex.yaml`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
discover: true
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.name }}'
|
||||
display_name_template: '{{ user.name|capitalize }}'
|
||||
```
|
||||
|
||||
### [Auth0][auth0]
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse base url]/_synapse/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
|
||||
```js
|
||||
function addPersistenceAttribute(user, context, callback) {
|
||||
user.user_metadata = user.user_metadata || {};
|
||||
user.user_metadata.preferred_username = user.user_metadata.preferred_username || user.user_id;
|
||||
context.idToken.preferred_username = user.user_metadata.preferred_username;
|
||||
|
||||
auth0.users.updateUserMetadata(user.user_id, user.user_metadata)
|
||||
.then(function(){
|
||||
callback(null, user, context);
|
||||
})
|
||||
.catch(function(err){
|
||||
callback(err);
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
GitHub is a bit special as it is not an OpenID Connect compliant provider, but just a regular OAuth2 provider.
|
||||
The `/user` API endpoint can be used to retrieve informations from the user.
|
||||
As the OIDC login mechanism needs an attribute to uniquely identify users and that endpoint does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new
|
||||
2. Set the callback URL to `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://github.com/"
|
||||
discover: false
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes:
|
||||
- read:user
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: 'id'
|
||||
localpart_template: '{{ user.login }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Google
|
||||
|
||||
1. Setup a project in the Google API Console
|
||||
2. Obtain the OAuth 2.0 credentials (see <https://developers.google.com/identity/protocols/oauth2/openid-connect>)
|
||||
3. Add this Authorized redirect URI: `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://accounts.google.com/"
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.given_name|lower }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes:
|
||||
- openid
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
@@ -18,9 +18,13 @@ To make Synapse (and therefore Riot) use it:
|
||||
metadata:
|
||||
local: ["samling.xml"]
|
||||
```
|
||||
5. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure
|
||||
5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`:
|
||||
```yaml
|
||||
public_baseurl: http://localhost:8080/
|
||||
```
|
||||
6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure
|
||||
the dependencies are installed and ready to go.
|
||||
6. Restart Synapse.
|
||||
7. Restart Synapse.
|
||||
|
||||
Then in Riot:
|
||||
|
||||
|
||||
@@ -29,14 +29,13 @@ from synapse.logging import context # omitted from future snippets
|
||||
def handle_request(request_id):
|
||||
request_context = context.LoggingContext()
|
||||
|
||||
calling_context = context.LoggingContext.current_context()
|
||||
context.LoggingContext.set_current_context(request_context)
|
||||
calling_context = context.set_current_context(request_context)
|
||||
try:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
finally:
|
||||
context.LoggingContext.set_current_context(calling_context)
|
||||
context.set_current_context(calling_context)
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
|
||||
@@ -60,6 +60,31 @@
|
||||
|
||||
1. Restart Prometheus.
|
||||
|
||||
## Monitoring workers
|
||||
|
||||
To monitor a Synapse installation using
|
||||
[workers](https://github.com/matrix-org/synapse/blob/master/docs/workers.md),
|
||||
every worker needs to be monitored independently, in addition to
|
||||
the main homeserver process. This is because workers don't send
|
||||
their metrics to the main homeserver process, but expose them
|
||||
directly (if they are configured to do so).
|
||||
|
||||
To allow collecting metrics from a worker, you need to add a
|
||||
`metrics` listener to its configuration, by adding the following
|
||||
under `worker_listeners`:
|
||||
|
||||
```yaml
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
```
|
||||
|
||||
The `bind_address` and `port` parameters should be set so that
|
||||
the resulting listener can be reached by prometheus, and they
|
||||
don't clash with an existing worker.
|
||||
With this example, the worker's metrics would then be available
|
||||
on `http://127.0.0.1:9101`.
|
||||
|
||||
## Renaming of metrics & deprecation of old names in 1.2
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
|
||||
@@ -9,7 +9,11 @@ into Synapse, and provides a number of methods by which it can integrate
|
||||
with the authentication system.
|
||||
|
||||
This document serves as a reference for those looking to implement their
|
||||
own password auth providers.
|
||||
own password auth providers. Additionally, here is a list of known
|
||||
password auth provider module implementations:
|
||||
|
||||
* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/)
|
||||
* [matrix-synapse-shared-secret-auth](https://github.com/devture/matrix-synapse-shared-secret-auth)
|
||||
|
||||
## Required methods
|
||||
|
||||
|
||||
@@ -61,7 +61,33 @@ Note that the PostgreSQL database *must* have the correct encoding set
|
||||
|
||||
You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/11/auth-pg-hba-conf.html>.
|
||||
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
@@ -72,8 +98,7 @@ underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as per
|
||||
[docs/postgres.md](docs/postgres.md)). It is also possible to change the
|
||||
the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
@@ -105,19 +130,41 @@ of free memory the database host has available.
|
||||
When you are ready to start using PostgreSQL, edit the `database`
|
||||
section in your config file to match the following lines:
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: <user>
|
||||
password: <pass>
|
||||
database: <db>
|
||||
host: <host>
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
```yaml
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: <user>
|
||||
password: <pass>
|
||||
database: <db>
|
||||
host: <host>
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
```
|
||||
|
||||
All key, values in `args` are passed to the `psycopg2.connect(..)`
|
||||
function, except keys beginning with `cp_`, which are consumed by the
|
||||
twisted adbapi connection pool.
|
||||
twisted adbapi connection pool. See the [libpq
|
||||
documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
|
||||
for a list of options which can be passed.
|
||||
|
||||
You should consider tuning the `args.keepalives_*` options if there is any danger of
|
||||
the connection between your homeserver and database dropping, otherwise Synapse
|
||||
may block for an extended period while it waits for a response from the
|
||||
database server. Example values might be:
|
||||
|
||||
```yaml
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Porting from SQLite
|
||||
|
||||
|
||||
@@ -42,6 +42,9 @@ the reverse proxy and the homeserver.
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 10M;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
# SAML Mapping Providers
|
||||
|
||||
A SAML mapping provider is a Python class (loaded via a Python module) that
|
||||
works out how to map attributes of a SAML response object to Matrix-specific
|
||||
user attributes. Details such as user ID localpart, displayname, and even avatar
|
||||
URLs are all things that can be mapped from talking to a SSO service.
|
||||
|
||||
As an example, a SSO service may return the email address
|
||||
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
|
||||
to turn that into a displayname when creating a Matrix user for this individual.
|
||||
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
|
||||
variations. As each Synapse configuration may want something different, this is
|
||||
where SAML mapping providers come into play.
|
||||
|
||||
## Enabling Providers
|
||||
|
||||
External mapping providers are provided to Synapse in the form of an external
|
||||
Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere,
|
||||
then tell Synapse where to look for the handler class by editing the
|
||||
`saml2_config.user_mapping_provider.module` config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
comment these options out and use those specified by the module instead.
|
||||
|
||||
## Building a Custom Mapping Provider
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `__init__(self, parsed_config)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
information from.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
mxid localpart mapping has failed. This should be used
|
||||
to create a deduplicated mxid localpart which should be
|
||||
returned instead. For example, if this method returns
|
||||
`john.doe` as the value of `mxid_localpart` in the returned
|
||||
dict, and that is already taken on the homeserver, this
|
||||
method will be called again with the same parameters but
|
||||
with failures=1. The method should then return a different
|
||||
`mxid_localpart` value, such as `john.doe1`.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - Required. The mxid localpart of the new user.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`saml2_config.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract any option values
|
||||
they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
`__init__` method during construction.
|
||||
* `get_saml_attributes(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A object resulting from a call to `parse_config`.
|
||||
- Returns a tuple of two sets. The first set equates to the saml auth
|
||||
response attributes that are required for the module to function, whereas
|
||||
the second set consists of those attributes which can be used if available,
|
||||
but are not necessary.
|
||||
|
||||
## Synapse's Default Provider
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
|
||||
@@ -33,10 +33,15 @@ server_name: "SERVERNAME"
|
||||
#
|
||||
pid_file: DATADIR/homeserver.pid
|
||||
|
||||
# The path to the web client which will be served at /_matrix/client/
|
||||
# if 'webclient' is configured under the 'listeners' configuration.
|
||||
# The absolute URL to the web client which /_matrix/client will redirect
|
||||
# to if 'webclient' is configured under the 'listeners' configuration.
|
||||
#
|
||||
#web_client_location: "/path/to/web/root"
|
||||
# This option can be also set to the filesystem path to the web client
|
||||
# which will be served at /_matrix/client/ if 'webclient' is configured
|
||||
# under the 'listeners' configuration, however this is a security risk:
|
||||
# https://github.com/matrix-org/synapse#security-note
|
||||
#
|
||||
#web_client_location: https://riot.example.com/
|
||||
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
@@ -248,6 +253,18 @@ listeners:
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
# Forward extremities can build up in a room due to networking delays between
|
||||
# homeservers. Once this happens in a large room, calculation of the state of
|
||||
# that room can become quite expensive. To mitigate this, once the number of
|
||||
# forward extremities reaches a given threshold, Synapse will send an
|
||||
# org.matrix.dummy_event event, which will reduce the forward extremities
|
||||
# in the room.
|
||||
#
|
||||
# This setting defines the threshold (i.e. number of forward extremities in the
|
||||
# room) at which dummy events are sent. The default value is 10.
|
||||
#
|
||||
#dummy_events_threshold: 5
|
||||
|
||||
|
||||
## Homeserver blocking ##
|
||||
|
||||
@@ -409,6 +426,16 @@ retention:
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -576,20 +603,88 @@ acme:
|
||||
|
||||
|
||||
|
||||
## Database ##
|
||||
## Caching ##
|
||||
|
||||
database:
|
||||
# The database engine name
|
||||
name: "sqlite3"
|
||||
# Arguments to pass to the engine
|
||||
args:
|
||||
# Path to the database
|
||||
database: "DATADIR/homeserver.db"
|
||||
# Caching can be configured through the following options.
|
||||
#
|
||||
# A cache 'factor' is a multiplier that can be applied to each of
|
||||
# Synapse's caches in order to increase or decrease the maximum
|
||||
# number of entries that can be stored.
|
||||
|
||||
# Number of events to cache in memory.
|
||||
# The number of events to cache in memory. Not affected by
|
||||
# caches.global_factor.
|
||||
#
|
||||
#event_cache_size: 10K
|
||||
|
||||
caches:
|
||||
# Controls the global cache factor, which is the default cache factor
|
||||
# for all caches if a specific factor for that cache is not otherwise
|
||||
# set.
|
||||
#
|
||||
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
|
||||
# variable. Setting by environment variable takes priority over
|
||||
# setting through the config file.
|
||||
#
|
||||
# Defaults to 0.5, which will half the size of all caches.
|
||||
#
|
||||
#global_factor: 1.0
|
||||
|
||||
# A dictionary of cache name to cache factor for that individual
|
||||
# cache. Overrides the global cache factor for a given cache.
|
||||
#
|
||||
# These can also be set through environment variables comprised
|
||||
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
|
||||
# letters and underscores. Setting by environment variable
|
||||
# takes priority over setting through the config file.
|
||||
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
|
||||
#
|
||||
per_cache_factors:
|
||||
#get_users_who_share_room_with_user: 2.0
|
||||
|
||||
|
||||
## Database ##
|
||||
|
||||
# The 'database' setting defines the database that synapse uses to store all of
|
||||
# its data.
|
||||
#
|
||||
# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or
|
||||
# 'psycopg2' (for PostgreSQL).
|
||||
#
|
||||
# 'args' gives options which are passed through to the database engine,
|
||||
# except for options starting 'cp_', which are used to configure the Twisted
|
||||
# connection pool. For a reference to valid arguments, see:
|
||||
# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
|
||||
# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
|
||||
# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__
|
||||
#
|
||||
#
|
||||
# Example SQLite configuration:
|
||||
#
|
||||
#database:
|
||||
# name: sqlite3
|
||||
# args:
|
||||
# database: /path/to/homeserver.db
|
||||
#
|
||||
#
|
||||
# Example Postgres configuration:
|
||||
#
|
||||
#database:
|
||||
# name: psycopg2
|
||||
# args:
|
||||
# user: synapse
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
# For more information on using Synapse with Postgres, see `docs/postgres.md`.
|
||||
#
|
||||
database:
|
||||
name: sqlite3
|
||||
args:
|
||||
database: DATADIR/homeserver.db
|
||||
|
||||
|
||||
## Logging ##
|
||||
|
||||
@@ -697,12 +792,11 @@ media_store_path: "DATADIR/media_store"
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# # Whether to store newly uploaded local files
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# # Whether to store newly downloaded remote files
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# # Whether to wait for successful storage for local uploads
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
@@ -821,6 +915,31 @@ media_store_path: "DATADIR/media_store"
|
||||
#
|
||||
#max_spider_size: 10M
|
||||
|
||||
# A list of values for the Accept-Language HTTP header used when
|
||||
# downloading webpages during URL preview generation. This allows
|
||||
# Synapse to specify the preferred languages that URL previews should
|
||||
# be in when communicating with remote servers.
|
||||
#
|
||||
# Each value is a IETF language tag; a 2-3 letter identifier for a
|
||||
# language, optionally followed by subtags separated by '-', specifying
|
||||
# a country or region variant.
|
||||
#
|
||||
# Multiple values can be provided, and a weight can be added to each by
|
||||
# using quality value syntax (;q=). '*' translates to any language.
|
||||
#
|
||||
# Defaults to "en".
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# url_preview_accept_language:
|
||||
# - en-UK
|
||||
# - en-US;q=0.9
|
||||
# - fr;q=0.8
|
||||
# - *;q=0.7
|
||||
#
|
||||
url_preview_accept_language:
|
||||
# - en
|
||||
|
||||
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
@@ -839,10 +958,6 @@ media_store_path: "DATADIR/media_store"
|
||||
#
|
||||
#enable_registration_captcha: false
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
#
|
||||
#recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
|
||||
@@ -1057,6 +1172,29 @@ account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
|
||||
# Whether users are allowed to change their displayname after it has
|
||||
# been initially set. Useful when provisioning users based on the
|
||||
# contents of a third-party directory.
|
||||
#
|
||||
# Does not apply to server administrators. Defaults to 'true'
|
||||
#
|
||||
#enable_set_displayname: false
|
||||
|
||||
# Whether users are allowed to change their avatar after it has been
|
||||
# initially set. Useful when provisioning users based on the contents
|
||||
# of a third-party directory.
|
||||
#
|
||||
# Does not apply to server administrators. Defaults to 'true'
|
||||
#
|
||||
#enable_set_avatar_url: false
|
||||
|
||||
# Whether users can change the 3PIDs associated with their accounts
|
||||
# (email address and msisdn).
|
||||
#
|
||||
# Defaults to 'true'
|
||||
#
|
||||
#enable_3pid_changes: false
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
@@ -1092,7 +1230,7 @@ account_threepid_delegates:
|
||||
# enabled by default, either for performance reasons or limited use.
|
||||
#
|
||||
metrics_flags:
|
||||
# Publish synapse_federation_known_servers, a g auge of the number of
|
||||
# Publish synapse_federation_known_servers, a gauge of the number of
|
||||
# servers this homeserver knows about, including itself. May cause
|
||||
# performance problems on large homeservers.
|
||||
#
|
||||
@@ -1258,32 +1396,32 @@ saml2_config:
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
#
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
|
||||
# Instead of putting the config inline as above, you can specify a
|
||||
# separate pysaml2 configuration file:
|
||||
@@ -1347,6 +1485,113 @@ saml2_config:
|
||||
#
|
||||
#grandfathered_mxid_source_attribute: upn
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * HTML page to display to users if something goes wrong during the
|
||||
# authentication process: 'saml_error.html'.
|
||||
#
|
||||
# This template doesn't currently need any variable to render.
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
# Enable OpenID Connect for registration and login. Uses authlib.
|
||||
#
|
||||
oidc_config:
|
||||
# enable OpenID Connect. Defaults to false.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# use the OIDC discovery mechanism to discover endpoints. Defaults to true.
|
||||
#
|
||||
#discover: true
|
||||
|
||||
# the OIDC issuer. Used to validate tokens and discover the providers endpoints. Required.
|
||||
#
|
||||
#issuer: "https://accounts.example.com/"
|
||||
|
||||
# oauth2 client id to use. Required.
|
||||
#
|
||||
#client_id: "provided-by-your-issuer"
|
||||
|
||||
# oauth2 client secret to use. Required.
|
||||
#
|
||||
#client_secret: "provided-by-your-issuer"
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are "client_secret_basic" (default), "client_secret_post" and "none".
|
||||
#
|
||||
#client_auth_method: "client_auth_basic"
|
||||
|
||||
# list of scopes to ask. This should include the "openid" scope. Defaults to ["openid"].
|
||||
#
|
||||
#scopes: ["openid"]
|
||||
|
||||
# the oauth2 authorization endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#authorization_endpoint: "https://accounts.example.com/oauth2/auth"
|
||||
|
||||
# the oauth2 token endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
|
||||
# the OIDC userinfo endpoint. Required if discovery is disabled and the "openid" scope is not asked.
|
||||
#
|
||||
#userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
|
||||
# URI where to fetch the JWKS. Required if discovery is disabled and the "openid" scope is used.
|
||||
#
|
||||
#jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
|
||||
# skip metadata verification. Defaults to false.
|
||||
# Use this if you are connecting to a provider that is not OpenID Connect compliant.
|
||||
# Avoid this in production.
|
||||
#
|
||||
#skip_verification: false
|
||||
|
||||
|
||||
# An external module can be provided here as a custom solution to mapping
|
||||
# attributes returned from a OIDC provider onto a matrix user.
|
||||
#
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
# Default is 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
#
|
||||
#module: mapping_provider.OidcMappingProvider
|
||||
|
||||
# Custom configuration values for the module. Below options are intended
|
||||
# for the built-in provider, they should be changed if using a custom
|
||||
# module. This section will be passed as a Python dictionary to the
|
||||
# module's `parse_config` method.
|
||||
#
|
||||
# Below is the config of the default mapping provider, based on Jinja2
|
||||
# templates. Those templates are used to render user attributes, where the
|
||||
# userinfo object is available through the `user` variable.
|
||||
#
|
||||
config:
|
||||
# name of the claim containing a unique identifier for the user.
|
||||
# Defaults to `sub`, which OpenID Connect compliant providers should provide.
|
||||
#
|
||||
#subject_claim: "sub"
|
||||
|
||||
# Jinja2 template for the localpart of the MXID
|
||||
#
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
|
||||
# Jinja2 template for the display name to set on first login. Optional.
|
||||
#
|
||||
#display_name_template: "{{ user.given_name }} {{ user.last_name }}"
|
||||
|
||||
|
||||
|
||||
# Enable CAS for registration and login.
|
||||
@@ -1373,6 +1618,10 @@ sso:
|
||||
# phishing attacks from evil.site. To avoid this, include a slash after the
|
||||
# hostname: "https://my.client/".
|
||||
#
|
||||
# If public_baseurl is set, then the login fallback page (used by clients
|
||||
# that don't natively support the required login flows) is whitelisted in
|
||||
# addition to any URLs in this list.
|
||||
#
|
||||
# By default, this list is empty.
|
||||
#
|
||||
#client_whitelist:
|
||||
@@ -1404,6 +1653,37 @@ sso:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
# Note that this page must include the JavaScript which notifies of a successful authentication
|
||||
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page to display to users if something goes wrong during the
|
||||
# OpenID Connect authentication process: 'sso_error.html'.
|
||||
#
|
||||
# When rendering, this template is given two variables:
|
||||
# * error: the technical name of the error
|
||||
# * error_description: a human-readable message for the error
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
@@ -1434,6 +1714,41 @@ password_config:
|
||||
#
|
||||
#pepper: "EVEN_MORE_SECRET"
|
||||
|
||||
# Define and enforce a password policy. Each parameter is optional.
|
||||
# This is an implementation of MSC2000.
|
||||
#
|
||||
policy:
|
||||
# Whether to enforce the password policy.
|
||||
# Defaults to 'false'.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Minimum accepted length for a password.
|
||||
# Defaults to 0.
|
||||
#
|
||||
#minimum_length: 15
|
||||
|
||||
# Whether a password must contain at least one digit.
|
||||
# Defaults to 'false'.
|
||||
#
|
||||
#require_digit: true
|
||||
|
||||
# Whether a password must contain at least one symbol.
|
||||
# A symbol is any character that's not a number or a letter.
|
||||
# Defaults to 'false'.
|
||||
#
|
||||
#require_symbol: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
# Defaults to 'false'.
|
||||
#
|
||||
#require_lowercase: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
# Defaults to 'false'.
|
||||
#
|
||||
#require_uppercase: true
|
||||
|
||||
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
@@ -1459,10 +1774,6 @@ email:
|
||||
#
|
||||
#require_transport_security: true
|
||||
|
||||
# Enable sending emails for messages that the user has missed
|
||||
#
|
||||
#enable_notifs: false
|
||||
|
||||
# notif_from defines the "From" address to use when sending emails.
|
||||
# It must be set if email sending is enabled.
|
||||
#
|
||||
@@ -1480,6 +1791,11 @@ email:
|
||||
#
|
||||
#app_name: my_branded_matrix_server
|
||||
|
||||
# Uncomment the following to enable sending emails for messages that the user
|
||||
# has missed. Disabled by default.
|
||||
#
|
||||
#enable_notifs: true
|
||||
|
||||
# Uncomment the following to disable automatic subscription to email
|
||||
# notifications for new users. Enabled by default.
|
||||
#
|
||||
@@ -1541,7 +1857,19 @@ email:
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
#password_providers:
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
# respectively.
|
||||
#
|
||||
password_providers:
|
||||
# # Example config for an LDAP auth provider
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
@@ -1574,10 +1902,17 @@ email:
|
||||
# include_content: true
|
||||
|
||||
|
||||
#spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
# Spam checkers are third-party modules that can block specific actions
|
||||
# of local users, such as creating rooms and registering undesirable
|
||||
# usernames, as well as remote users by redacting incoming events.
|
||||
#
|
||||
spam_checker:
|
||||
#- module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
#- module: "some_other_project.BadEventStopper"
|
||||
# config:
|
||||
# example_stop_events_from: ['@bad:example.com']
|
||||
|
||||
|
||||
# Uncomment to allow non-server-admin users to create groups on this server
|
||||
|
||||
@@ -64,10 +64,12 @@ class ExampleSpamChecker:
|
||||
Modify the `spam_checker` section of your `homeserver.yaml` in the following
|
||||
manner:
|
||||
|
||||
`module` should point to the fully qualified Python class that implements your
|
||||
custom logic, e.g. `my_module.ExampleSpamChecker`.
|
||||
Create a list entry with the keys `module` and `config`.
|
||||
|
||||
`config` is a dictionary that gets passed to the spam checker class.
|
||||
* `module` should point to the fully qualified Python class that implements your
|
||||
custom logic, e.g. `my_module.ExampleSpamChecker`.
|
||||
|
||||
* `config` is a dictionary that gets passed to the spam checker class.
|
||||
|
||||
### Example
|
||||
|
||||
@@ -75,12 +77,15 @@ This section might look like:
|
||||
|
||||
```yaml
|
||||
spam_checker:
|
||||
module: my_module.ExampleSpamChecker
|
||||
config:
|
||||
# Enable or disable a specific option in ExampleSpamChecker.
|
||||
my_custom_option: true
|
||||
- module: my_module.ExampleSpamChecker
|
||||
config:
|
||||
# Enable or disable a specific option in ExampleSpamChecker.
|
||||
my_custom_option: true
|
||||
```
|
||||
|
||||
More spam checkers can be added in tandem by appending more items to the list. An
|
||||
action is blocked when at least one of the configured spam checkers flags it.
|
||||
|
||||
## Examples
|
||||
|
||||
The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
|
||||
|
||||
146
docs/sso_mapping_providers.md
Normal file
146
docs/sso_mapping_providers.md
Normal file
@@ -0,0 +1,146 @@
|
||||
# SSO Mapping Providers
|
||||
|
||||
A mapping provider is a Python class (loaded via a Python module) that
|
||||
works out how to map attributes of a SSO response to Matrix-specific
|
||||
user attributes. Details such as user ID localpart, displayname, and even avatar
|
||||
URLs are all things that can be mapped from talking to a SSO service.
|
||||
|
||||
As an example, a SSO service may return the email address
|
||||
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
|
||||
to turn that into a displayname when creating a Matrix user for this individual.
|
||||
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
|
||||
variations. As each Synapse configuration may want something different, this is
|
||||
where SAML mapping providers come into play.
|
||||
|
||||
SSO mapping providers are currently supported for OpenID and SAML SSO
|
||||
configurations. Please see the details below for how to implement your own.
|
||||
|
||||
External mapping providers are provided to Synapse in the form of an external
|
||||
Python module. You can retrieve this module from [PyPi](https://pypi.org) or elsewhere,
|
||||
but it must be importable via Synapse (e.g. it must be in the same virtualenv
|
||||
as Synapse). The Synapse config is then modified to point to the mapping provider
|
||||
(and optionally provide additional configuration for it).
|
||||
|
||||
## OpenID Mapping Providers
|
||||
|
||||
The OpenID mapping provider can be customized by editing the
|
||||
`oidc_config.user_mapping_provider.module` config option.
|
||||
|
||||
`oidc_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
comment these options out and use those specified by the module instead.
|
||||
|
||||
### Building a Custom OpenID Mapping Provider
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `__init__(self, parsed_config)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
* `parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`oidc_config.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract and validate
|
||||
any option values they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
`__init__` method during construction.
|
||||
* `get_remote_user_id(self, userinfo)`
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``sub`` claim of the response.
|
||||
* `map_user_attributes(self, userinfo, token)`
|
||||
- This method should be async.
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- `token` - A dictionary which includes information necessary to make
|
||||
further requests to the OpenID provider.
|
||||
- Returns a dictionary with two keys:
|
||||
- localpart: A required string, used to generate the Matrix ID.
|
||||
- displayname: An optional string, the display name for the user.
|
||||
|
||||
### Default OpenID Mapping Provider
|
||||
|
||||
Synapse has a built-in OpenID mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py).
|
||||
|
||||
## SAML Mapping Providers
|
||||
|
||||
The SAML mapping provider can be customized by editing the
|
||||
`saml2_config.user_mapping_provider.module` config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
comment these options out and use those specified by the module instead.
|
||||
|
||||
### Building a Custom SAML Mapping Provider
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `__init__(self, parsed_config)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
* `parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`saml_config.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract and validate
|
||||
any option values they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
`__init__` method during construction.
|
||||
* `get_saml_attributes(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A object resulting from a call to `parse_config`.
|
||||
- Returns a tuple of two sets. The first set equates to the SAML auth
|
||||
response attributes that are required for the module to function, whereas
|
||||
the second set consists of those attributes which can be used if available,
|
||||
but are not necessary.
|
||||
* `get_remote_user_id(self, saml_response, client_redirect_url)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
information from.
|
||||
- `client_redirect_url` - A string, the URL that the client will be
|
||||
redirected to.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``uid`` claim of the response.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
information from.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
mxid localpart mapping has failed. This should be used
|
||||
to create a deduplicated mxid localpart which should be
|
||||
returned instead. For example, if this method returns
|
||||
`john.doe` as the value of `mxid_localpart` in the returned
|
||||
dict, and that is already taken on the homeserver, this
|
||||
method will be called again with the same parameters but
|
||||
with failures=1. The method should then return a different
|
||||
`mxid_localpart` value, such as `john.doe1`.
|
||||
- `client_redirect_url` - A string, the URL that the client will be
|
||||
redirected to.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - Required. The mxid localpart of the new user.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
|
||||
### Default SAML Mapping Provider
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
|
||||
67
docs/systemd-with-workers/README.md
Normal file
67
docs/systemd-with-workers/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Setting up Synapse with Workers and Systemd
|
||||
|
||||
This is a setup for managing synapse with systemd, including support for
|
||||
managing workers. It provides a `matrix-synapse` service for the master, as
|
||||
well as a `matrix-synapse-worker@` service template for any workers you
|
||||
require. Additionally, to group the required services, it sets up a
|
||||
`matrix-synapse.target`.
|
||||
|
||||
See the folder [system](system) for the systemd unit files.
|
||||
|
||||
The folder [workers](workers) contains an example configuration for the
|
||||
`federation_reader` worker.
|
||||
|
||||
## Synapse configuration files
|
||||
|
||||
See [workers.md](../workers.md) for information on how to set up the
|
||||
configuration files and reverse-proxy correctly. You can find an example worker
|
||||
config in the [workers](workers) folder.
|
||||
|
||||
Systemd manages daemonization itself, so ensure that none of the configuration
|
||||
files set either `daemonize` or `worker_daemonize`.
|
||||
|
||||
The config files of all workers are expected to be located in
|
||||
`/etc/matrix-synapse/workers`. If you want to use a different location, edit
|
||||
the provided `*.service` files accordingly.
|
||||
|
||||
There is no need for a separate configuration file for the master process.
|
||||
|
||||
## Set up
|
||||
|
||||
1. Adjust synapse configuration files as above.
|
||||
1. Copy the `*.service` and `*.target` files in [system](system) to
|
||||
`/etc/systemd/system`.
|
||||
1. Run `systemctl deamon-reload` to tell systemd to load the new unit files.
|
||||
1. Run `systemctl enable matrix-synapse.service`. This will configure the
|
||||
synapse master process to be started as part of the `matrix-synapse.target`
|
||||
target.
|
||||
1. For each worker process to be enabled, run `systemctl enable
|
||||
matrix-synapse-worker@<worker_name>.service`. For each `<worker_name>`, there
|
||||
should be a corresponding configuration file
|
||||
`/etc/matrix-synapse/workers/<worker_name>.yaml`.
|
||||
1. Start all the synapse processes with `systemctl start matrix-synapse.target`.
|
||||
1. Tell systemd to start synapse on boot with `systemctl enable matrix-synapse.target`/
|
||||
|
||||
## Usage
|
||||
|
||||
Once the services are correctly set up, you can use the following commands
|
||||
to manage your synapse installation:
|
||||
|
||||
```sh
|
||||
# Restart Synapse master and all workers
|
||||
systemctl restart matrix-synapse.target
|
||||
|
||||
# Stop Synapse and all workers
|
||||
systemctl stop matrix-synapse.target
|
||||
|
||||
# Restart the master alone
|
||||
systemctl start matrix-synapse.service
|
||||
|
||||
# Restart a specific worker (eg. federation_reader); the master is
|
||||
# unaffected by this.
|
||||
systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
|
||||
# Add a new worker (assuming all configs are set up already)
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
@@ -0,0 +1,20 @@
|
||||
[Unit]
|
||||
Description=Synapse %i
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse-%i
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix-synapse.target
|
||||
@@ -1,5 +1,8 @@
|
||||
[Unit]
|
||||
Description=Synapse Matrix Homeserver
|
||||
Description=Synapse master
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
@@ -15,4 +18,4 @@ RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix.target
|
||||
WantedBy=matrix-synapse.target
|
||||
6
docs/systemd-with-workers/system/matrix-synapse.target
Normal file
6
docs/systemd-with-workers/system/matrix-synapse.target
Normal file
@@ -0,0 +1,6 @@
|
||||
[Unit]
|
||||
Description=Synapse parent target
|
||||
After=network.target
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -10,5 +10,4 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [federation]
|
||||
|
||||
worker_daemonize: false
|
||||
worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
|
||||
@@ -14,16 +14,18 @@ example flow would be (where '>' indicates master to worker and
|
||||
'<' worker to master flows):
|
||||
|
||||
> SERVER example.com
|
||||
< REPLICATE events 53
|
||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||
< REPLICATE
|
||||
> POSITION events master 53
|
||||
> RDATA events master 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events master 55 ["$foo4:bar.com", ...]
|
||||
|
||||
The example shows the server accepting a new connection and sending its
|
||||
identity with the `SERVER` command, followed by the client asking to
|
||||
subscribe to the `events` stream from the token `53`. The server then
|
||||
periodically sends `RDATA` commands which have the format
|
||||
`RDATA <stream_name> <token> <row>`, where the format of `<row>` is
|
||||
defined by the individual streams.
|
||||
The example shows the server accepting a new connection and sending its identity
|
||||
with the `SERVER` command, followed by the client server to respond with the
|
||||
position of all streams. The server then periodically sends `RDATA` commands
|
||||
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
|
||||
the format of `<row>` is defined by the individual streams. The
|
||||
`<instance_name>` is the name of the Synapse process that generated the data
|
||||
(usually "master").
|
||||
|
||||
Error reporting happens by either the client or server sending an ERROR
|
||||
command, and usually the connection will be closed.
|
||||
@@ -32,9 +34,6 @@ Since the protocol is a simple line based, its possible to manually
|
||||
connect to the server using a tool like netcat. A few things should be
|
||||
noted when manually using the protocol:
|
||||
|
||||
- When subscribing to a stream using `REPLICATE`, the special token
|
||||
`NOW` can be used to get all future updates. The special stream name
|
||||
`ALL` can be used with `NOW` to subscribe to all available streams.
|
||||
- The federation stream is only available if federation sending has
|
||||
been disabled on the main process.
|
||||
- The server will only time connections out that have sent a `PING`
|
||||
@@ -55,7 +54,7 @@ The basic structure of the protocol is line based, where the initial
|
||||
word of each line specifies the command. The rest of the line is parsed
|
||||
based on the command. For example, the RDATA command is defined as:
|
||||
|
||||
RDATA <stream_name> <token> <row_json>
|
||||
RDATA <stream_name> <instance_name> <token> <row_json>
|
||||
|
||||
(Note that <row_json> may contains spaces, but cannot contain
|
||||
newlines.)
|
||||
@@ -91,9 +90,7 @@ The client:
|
||||
- Sends a `NAME` command, allowing the server to associate a human
|
||||
friendly name with the connection. This is optional.
|
||||
- Sends a `PING` as above
|
||||
- For each stream the client wishes to subscribe to it sends a
|
||||
`REPLICATE` with the `stream_name` and token it wants to subscribe
|
||||
from.
|
||||
- Sends a `REPLICATE` to get the current position of all streams.
|
||||
- On receipt of a `SERVER` command, checks that the server name
|
||||
matches the expected server name.
|
||||
|
||||
@@ -140,14 +137,12 @@ the wire:
|
||||
> PING 1490197665618
|
||||
< NAME synapse.app.appservice
|
||||
< PING 1490197665618
|
||||
< REPLICATE events 1
|
||||
< REPLICATE backfill 1
|
||||
< REPLICATE caches 1
|
||||
> POSITION events 1
|
||||
> POSITION backfill 1
|
||||
> POSITION caches 1
|
||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||
< REPLICATE
|
||||
> POSITION events master 1
|
||||
> POSITION backfill master 1
|
||||
> POSITION caches master 1
|
||||
> RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events master 14 ["$149019767112vOHxz:localhost:8823",
|
||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||
< PING 1490197675618
|
||||
> ERROR server stopping
|
||||
@@ -158,10 +153,10 @@ position without needing to send data with the `RDATA` command.
|
||||
|
||||
An example of a batched set of `RDATA` is:
|
||||
|
||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
> RDATA caches master batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches master batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches master batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches master 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
|
||||
In this case the client shouldn't advance their caches token until it
|
||||
sees the the last `RDATA`.
|
||||
@@ -181,9 +176,14 @@ client (C):
|
||||
|
||||
#### POSITION (S)
|
||||
|
||||
The position of the stream has been updated. Sent to the client
|
||||
after all missing updates for a stream have been sent to the client
|
||||
and they're now up to date.
|
||||
On receipt of a POSITION command clients should check if they have missed any
|
||||
updates, and if so then fetch them out of band. Sent in response to a
|
||||
REPLICATE command (but can happen at any time).
|
||||
|
||||
The POSITION command includes the source of the stream. Currently all streams
|
||||
are written by a single process (usually "master"). If fetching missing
|
||||
updates via HTTP API, rather than via the DB, then processes should make the
|
||||
request to the appropriate process.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
@@ -199,24 +199,17 @@ client (C):
|
||||
|
||||
#### REPLICATE (C)
|
||||
|
||||
Asks the server to replicate a given stream. The syntax is:
|
||||
|
||||
```
|
||||
REPLICATE <stream_name> <token>
|
||||
```
|
||||
|
||||
Where `<token>` may be either:
|
||||
* a numeric stream_id to stream updates since (exclusive)
|
||||
* `NOW` to stream all subsequent updates.
|
||||
|
||||
The `<stream_name>` is the name of a replication stream to subscribe
|
||||
to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
|
||||
of streams). It can also be `ALL` to subscribe to all known streams,
|
||||
in which case the `<token>` must be set to `NOW`.
|
||||
Asks the server for the current position of all streams.
|
||||
|
||||
#### USER_SYNC (C)
|
||||
|
||||
A user has started or stopped syncing
|
||||
A user has started or stopped syncing on this process.
|
||||
|
||||
#### CLEAR_USER_SYNC (C)
|
||||
|
||||
The server should clear all associated user sync data from the worker.
|
||||
|
||||
This is used when a worker is shutting down.
|
||||
|
||||
#### FEDERATION_ACK (C)
|
||||
|
||||
@@ -226,14 +219,6 @@ in which case the `<token>` must be set to `NOW`.
|
||||
|
||||
Inform the server a pusher should be removed
|
||||
|
||||
#### INVALIDATE_CACHE (C)
|
||||
|
||||
Inform the server a cache should be invalidated
|
||||
|
||||
#### SYNC (S, C)
|
||||
|
||||
Used exclusively in tests
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
@@ -252,12 +237,12 @@ Each individual cache invalidation results in a row being sent down
|
||||
replication, which includes the cache name (the name of the function)
|
||||
and they key to invalidate. For example:
|
||||
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
> RDATA caches master 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
Alternatively, an entire cache can be invalidated by sending down a `null`
|
||||
instead of the key. For example:
|
||||
|
||||
> RDATA caches 550953772 ["get_user_by_id", null, 1550574873252]
|
||||
> RDATA caches master 550953772 ["get_user_by_id", null, 1550574873252]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated
|
||||
at the same time with the same key. To reduce traffic we batch those
|
||||
|
||||
@@ -11,6 +11,13 @@ TURN server.
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind a NAT (even with appropriate port forwarding) is known to cause issues
|
||||
and to often not work.
|
||||
|
||||
## `coturn` Setup
|
||||
|
||||
### Initial installation
|
||||
@@ -113,7 +120,7 @@ Your home server configuration file needs the following extra keys:
|
||||
As an example, here is the relevant section of the config file for matrix.org:
|
||||
|
||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: True
|
||||
|
||||
|
||||
208
docs/workers.md
208
docs/workers.md
@@ -1,23 +1,31 @@
|
||||
# Scaling synapse via workers
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
For small instances it recommended to run Synapse in monolith mode (the
|
||||
default). For larger instances where performance is a concern it can be helpful
|
||||
to split out functionality into multiple separate python processes. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All of the below is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
Synapse's worker support is under active development and subject to change as
|
||||
we attempt to rapidly scale ever larger Synapse instances. However we are
|
||||
documenting it here to help admins needing a highly scalable Synapse instance
|
||||
similar to the one running `matrix.org`.
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
postgres anyway if you care about scalability).
|
||||
All processes continue to share the same database instance, and as such,
|
||||
workers only work with PostgreSQL-based Synapse deployments. SQLite should only
|
||||
be used for demo purposes and any admin considering workers should already be
|
||||
running PostgreSQL.
|
||||
|
||||
The workers communicate with the master synapse process via a synapse-specific
|
||||
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
## Master/worker communication
|
||||
|
||||
The workers communicate with the master process via a Synapse-specific protocol
|
||||
called 'replication' (analogous to MySQL- or Postgres-style database
|
||||
replication) which feeds a stream of relevant data from the master to the
|
||||
workers so they can be kept in sync with the master process and database state.
|
||||
|
||||
Additionally, workers may make HTTP requests to the master, to send information
|
||||
in the other direction. Typically this is used for operations which need to
|
||||
wait for a reply - such as sending an event.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -27,72 +35,61 @@ the correct worker, or to the main synapse instance. Note that this includes
|
||||
requests made to the federation port. See [reverse_proxy.md](reverse_proxy.md)
|
||||
for information on setting up a reverse proxy.
|
||||
|
||||
To enable workers, you need to add two replication listeners to the master
|
||||
synapse, e.g.:
|
||||
To enable workers, you need to add *two* replication listeners to the
|
||||
main Synapse configuration file (`homeserver.yaml`). For example:
|
||||
|
||||
listeners:
|
||||
# The TCP replication port
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: replication
|
||||
# The HTTP replication port
|
||||
- port: 9093
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
resources:
|
||||
- names: [replication]
|
||||
```yaml
|
||||
listeners:
|
||||
# The TCP replication port
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: replication
|
||||
|
||||
# The HTTP replication port
|
||||
- port: 9093
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
|
||||
Under **no circumstances** should these replication API listeners be exposed to
|
||||
the public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted.
|
||||
the public internet; they have no authentication and are unencrypted.
|
||||
|
||||
(Roughly, the TCP port is used for streaming data from the master to the
|
||||
workers, and the HTTP port for the workers to send data to the main
|
||||
synapse process.)
|
||||
You should then create a set of configs for the various worker processes. Each
|
||||
worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that
|
||||
worker, e.g. the HTTP listener that it provides (if any); logging
|
||||
configuration; etc. You should minimise the number of overrides though to
|
||||
maintain a usable config.
|
||||
|
||||
You then create a set of configs for the various worker processes. These
|
||||
should be worker configuration files, and should be stored in a dedicated
|
||||
subdirectory, to allow synctl to manipulate them. An additional configuration
|
||||
for the master synapse process will need to be created because the process will
|
||||
not be started automatically. That configuration should look like this:
|
||||
In the config file for each worker, you must specify the type of worker
|
||||
application (`worker_app`). The currently available worker applications are
|
||||
listed below. You must also specify the replication endpoints that it should
|
||||
talk to on the main synapse process. `worker_replication_host` should specify
|
||||
the host of the main synapse, `worker_replication_port` should point to the TCP
|
||||
replication listener port and `worker_replication_http_port` should point to
|
||||
the HTTP replication port.
|
||||
|
||||
worker_app: synapse.app.homeserver
|
||||
daemonize: true
|
||||
For example:
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
```yaml
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
You must specify the type of worker application (`worker_app`). The currently
|
||||
available worker applications are listed below. You must also specify the
|
||||
replication endpoints that it's talking to on the main synapse process.
|
||||
`worker_replication_host` should specify the host of the main synapse,
|
||||
`worker_replication_port` should point to the TCP replication listener port and
|
||||
`worker_replication_http_port` should point to the HTTP replication port.
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
Currently, the `event_creator` and `federation_reader` workers require specifying
|
||||
`worker_replication_http_port`.
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
For instance:
|
||||
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
worker_daemonize: True
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
```
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP `/sync` endpoint on port 8083 separately from the `/sync` endpoint provided
|
||||
@@ -101,7 +98,75 @@ by the main synapse.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
`synctl` or your distribution's preferred service manager such as `systemd`. We
|
||||
recommend the use of `systemd` where available: for information on setting up
|
||||
`systemd` to start synapse workers, see
|
||||
[systemd-with-workers](systemd-with-workers). To use `synctl`, see below.
|
||||
|
||||
### **Experimental** support for replication over redis
|
||||
|
||||
As of Synapse v1.13.0, it is possible to configure Synapse to send replication
|
||||
via a [Redis pub/sub channel](https://redis.io/topics/pubsub). This is an
|
||||
alternative to direct TCP connections to the master: rather than all the
|
||||
workers connecting to the master, all the workers and the master connect to
|
||||
Redis, which relays replication commands between processes. This can give a
|
||||
significant cpu saving on the master and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
Note that this support is currently experimental; you may experience lost
|
||||
messages and similar problems! It is strongly recommended that admins setting
|
||||
up workers for the first time use direct TCP replication as above.
|
||||
|
||||
To configure Synapse to use Redis:
|
||||
|
||||
1. Install Redis following the normal procedure for your distribution - for
|
||||
example, on Debian, `apt install redis-server`. (It is safe to use an
|
||||
existing Redis deployment if you have one: we use a pub/sub stream named
|
||||
according to the `server_name` of your synapse server.)
|
||||
2. Check Redis is running and accessible: you should be able to `echo PING | nc -q1
|
||||
localhost 6379` and get a response of `+PONG`.
|
||||
3. Install the python prerequisites. If you installed synapse into a
|
||||
virtualenv, this can be done with:
|
||||
```sh
|
||||
pip install matrix-synapse[redis]
|
||||
```
|
||||
The debian packages from matrix.org already include the required
|
||||
dependencies.
|
||||
4. Add config to the shared configuration (`homeserver.yaml`):
|
||||
```yaml
|
||||
redis:
|
||||
enabled: true
|
||||
```
|
||||
Optional parameters which can go alongside `enabled` are `host`, `port`,
|
||||
`password`. Normally none of these are required.
|
||||
5. Restart master and all workers.
|
||||
|
||||
Once redis replication is in use, `worker_replication_port` is redundant and
|
||||
can be removed from the worker configuration files. Similarly, the
|
||||
configuration for the `listener` for the TCP replication port can be removed
|
||||
from the main configuration file. Note that the HTTP replication port is
|
||||
still required.
|
||||
|
||||
### Using synctl
|
||||
|
||||
If you want to use `synctl` to manage your synapse processes, you will need to
|
||||
create an an additional configuration file for the master synapse process. That
|
||||
configuration should look like this:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.homeserver
|
||||
```
|
||||
|
||||
Additionally, each worker app must be configured with the name of a "pid file",
|
||||
to which it will write its process ID when it starts. For example, for a
|
||||
synchrotron, you might write:
|
||||
|
||||
```yaml
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
```
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.:
|
||||
|
||||
@@ -268,11 +333,14 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/account_data/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/rooms/[^/]*/account_data/
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance:
|
||||
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
Pagination requests can also be handled, but all requests with the same path
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -75,3 +75,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -103,7 +103,7 @@ def main():
|
||||
|
||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||
|
||||
rows = list(row for server, json in result.items() for row in rows_v2(server, json))
|
||||
rows = [row for server, json in result.items() for row in rows_v2(server, json)]
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.executemany(
|
||||
|
||||
@@ -122,7 +122,7 @@ APPEND_ONLY_TABLES = [
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"cache_invalidation_stream_by_instance",
|
||||
"public_room_list_stream",
|
||||
"state_group_edges",
|
||||
"stream_ordering_to_exterm",
|
||||
@@ -188,7 +188,7 @@ class MockHomeserver:
|
||||
self.clock = Clock(reactor)
|
||||
self.config = config
|
||||
self.hostname = config.server_name
|
||||
self.version_string = "Synapse/"+get_version_string(synapse)
|
||||
self.version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
def get_clock(self):
|
||||
return self.clock
|
||||
|
||||
@@ -1,20 +1,31 @@
|
||||
name: matrix-synapse
|
||||
base: core18
|
||||
version: git
|
||||
version: git
|
||||
summary: Reference Matrix homeserver
|
||||
description: |
|
||||
Synapse is the reference Matrix homeserver.
|
||||
Matrix is a federated and decentralised instant messaging and VoIP system.
|
||||
|
||||
grade: stable
|
||||
confinement: strict
|
||||
grade: stable
|
||||
confinement: strict
|
||||
|
||||
apps:
|
||||
matrix-synapse:
|
||||
matrix-synapse:
|
||||
command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
|
||||
stop-command: synctl -c $SNAP_COMMON stop
|
||||
plugs: [network-bind, network]
|
||||
daemon: simple
|
||||
daemon: simple
|
||||
hash-password:
|
||||
command: hash_password
|
||||
generate-config:
|
||||
command: generate_config
|
||||
generate-signing-key:
|
||||
command: generate_signing_key.py
|
||||
register-new-matrix-user:
|
||||
command: register_new_matrix_user
|
||||
plugs: [network]
|
||||
synctl:
|
||||
command: synctl
|
||||
parts:
|
||||
matrix-synapse:
|
||||
source: .
|
||||
@@ -22,6 +33,10 @@ parts:
|
||||
python-version: python3
|
||||
python-packages:
|
||||
- '.[all]'
|
||||
- pip
|
||||
- setuptools
|
||||
- setuptools-scm
|
||||
- wheel
|
||||
build-packages:
|
||||
- libffi-dev
|
||||
- libturbojpeg0-dev
|
||||
|
||||
13
stubs/sortedcontainers/__init__.pyi
Normal file
13
stubs/sortedcontainers/__init__.pyi
Normal file
@@ -0,0 +1,13 @@
|
||||
from .sorteddict import (
|
||||
SortedDict,
|
||||
SortedKeysView,
|
||||
SortedItemsView,
|
||||
SortedValuesView,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"SortedDict",
|
||||
"SortedKeysView",
|
||||
"SortedItemsView",
|
||||
"SortedValuesView",
|
||||
]
|
||||
124
stubs/sortedcontainers/sorteddict.pyi
Normal file
124
stubs/sortedcontainers/sorteddict.pyi
Normal file
@@ -0,0 +1,124 @@
|
||||
# stub for SortedDict. This is a lightly edited copy of
|
||||
# https://github.com/grantjenks/python-sortedcontainers/blob/eea42df1f7bad2792e8da77335ff888f04b9e5ae/sortedcontainers/sorteddict.pyi
|
||||
# (from https://github.com/grantjenks/python-sortedcontainers/pull/107)
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterator,
|
||||
Iterable,
|
||||
ItemsView,
|
||||
KeysView,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
TypeVar,
|
||||
Tuple,
|
||||
Union,
|
||||
ValuesView,
|
||||
overload,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_S = TypeVar("_S")
|
||||
_T_h = TypeVar("_T_h", bound=Hashable)
|
||||
_KT = TypeVar("_KT", bound=Hashable) # Key type.
|
||||
_VT = TypeVar("_VT") # Value type.
|
||||
_KT_co = TypeVar("_KT_co", covariant=True, bound=Hashable)
|
||||
_VT_co = TypeVar("_VT_co", covariant=True)
|
||||
_SD = TypeVar("_SD", bound=SortedDict)
|
||||
_Key = Callable[[_T], Any]
|
||||
|
||||
class SortedDict(Dict[_KT, _VT]):
|
||||
@overload
|
||||
def __init__(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __key: _Key[_KT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __key: _Key[_KT], __map: Mapping[_KT, _VT], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __key: _Key[_KT], __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@property
|
||||
def key(self) -> Optional[_Key[_KT]]: ...
|
||||
@property
|
||||
def iloc(self) -> SortedKeysView[_KT]: ...
|
||||
def clear(self) -> None: ...
|
||||
def __delitem__(self, key: _KT) -> None: ...
|
||||
def __iter__(self) -> Iterator[_KT]: ...
|
||||
def __reversed__(self) -> Iterator[_KT]: ...
|
||||
def __setitem__(self, key: _KT, value: _VT) -> None: ...
|
||||
def _setitem(self, key: _KT, value: _VT) -> None: ...
|
||||
def copy(self: _SD) -> _SD: ...
|
||||
def __copy__(self: _SD) -> _SD: ...
|
||||
@classmethod
|
||||
@overload
|
||||
def fromkeys(cls, seq: Iterable[_T_h]) -> SortedDict[_T_h, None]: ...
|
||||
@classmethod
|
||||
@overload
|
||||
def fromkeys(cls, seq: Iterable[_T_h], value: _S) -> SortedDict[_T_h, _S]: ...
|
||||
def keys(self) -> SortedKeysView[_KT]: ...
|
||||
def items(self) -> SortedItemsView[_KT, _VT]: ...
|
||||
def values(self) -> SortedValuesView[_VT]: ...
|
||||
@overload
|
||||
def pop(self, key: _KT) -> _VT: ...
|
||||
@overload
|
||||
def pop(self, key: _KT, default: _T = ...) -> Union[_VT, _T]: ...
|
||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||
@overload
|
||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, **kwargs: _VT) -> None: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
Type[SortedDict[_KT, _VT]], Tuple[Callable[[_KT], Any], List[Tuple[_KT, _VT]]],
|
||||
]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def _check(self) -> None: ...
|
||||
def islice(
|
||||
self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool,
|
||||
) -> Iterator[_KT]: ...
|
||||
def bisect_left(self, value: _KT) -> int: ...
|
||||
def bisect_right(self, value: _KT) -> int: ...
|
||||
|
||||
class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _KT_co: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_KT_co]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
|
||||
class SortedItemsView( # type: ignore
|
||||
ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]
|
||||
):
|
||||
def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[Tuple[_KT_co, _VT_co]]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
|
||||
class SortedValuesView(ValuesView[_VT_co], Sequence[_VT_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _VT_co: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_VT_co]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
43
stubs/txredisapi.pyi
Normal file
43
stubs/txredisapi.pyi
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains *incomplete* type hints for txredisapi.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
class RedisProtocol:
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
|
||||
class SubscriberProtocol:
|
||||
password: Optional[str]
|
||||
def subscribe(self, channels: Union[str, List[str]]): ...
|
||||
def connectionMade(self): ...
|
||||
def connectionLost(self, reason): ...
|
||||
|
||||
def lazyConnection(
|
||||
host: str = ...,
|
||||
port: int = ...,
|
||||
dbid: Optional[int] = ...,
|
||||
reconnect: bool = ...,
|
||||
charset: str = ...,
|
||||
password: Optional[str] = ...,
|
||||
connectTimeout: Optional[int] = ...,
|
||||
replyTimeout: Optional[int] = ...,
|
||||
convertNumbers: bool = ...,
|
||||
) -> RedisProtocol: ...
|
||||
|
||||
class SubscriberFactory:
|
||||
def buildProtocol(self, addr): ...
|
||||
@@ -36,7 +36,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.11.1"
|
||||
__version__ = "1.13.0rc2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -26,19 +26,18 @@ from twisted.internet import defer
|
||||
import synapse.logging.opentracing as opentracing
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, LimitBlockingTypes, Membership, UserTypes
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches import register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -74,10 +73,14 @@ class Auth(object):
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
self.token_cache = LruCache(10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
self._auth_blocking = AuthBlocking(self.hs)
|
||||
|
||||
self._account_validity = hs.config.account_validity
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
@@ -191,7 +194,7 @@ class Auth(object):
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
|
||||
if ip_addr and self.hs.config.track_appservice_user_ips:
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
yield self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
@@ -454,7 +457,7 @@ class Auth(object):
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
|
||||
def _verify_expiry(self, caveat):
|
||||
prefix = "time < "
|
||||
@@ -537,27 +540,26 @@ class Auth(object):
|
||||
|
||||
return defer.succeed(auth_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
"""Check if the user is allowed to edit the room's entry in the
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
user (UserID)
|
||||
room_id
|
||||
user
|
||||
"""
|
||||
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
is_admin = await self.is_server_admin(user)
|
||||
if is_admin:
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_user_in_room(room_id, user_id)
|
||||
await self.check_user_in_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
# m.room.aliases events
|
||||
power_level_event = yield self.state.get_current_state(
|
||||
# m.room.canonical_alias events
|
||||
power_level_event = await self.state.get_current_state(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
)
|
||||
|
||||
@@ -566,16 +568,11 @@ class Auth(object):
|
||||
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||
|
||||
send_level = event_auth.get_send_level(
|
||||
EventTypes.Aliases, "", power_level_event
|
||||
EventTypes.CanonicalAlias, "", power_level_event
|
||||
)
|
||||
user_level = event_auth.get_user_power_level(user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"This server requires you to be a moderator in the room to"
|
||||
" edit its room list entry",
|
||||
)
|
||||
return user_level >= send_level
|
||||
|
||||
@staticmethod
|
||||
def has_access_token(request):
|
||||
@@ -669,71 +666,5 @@ class Auth(object):
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
|
||||
user_type(str|None): If present, is used to decide whether to check against
|
||||
certain blocking reasons like MAU.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users or support user
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id is not None:
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
limit_type=LimitBlockingTypes.HS_DISABLED,
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(
|
||||
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||
):
|
||||
return
|
||||
elif user_type == UserTypes.SUPPORT:
|
||||
# If the user does not exist yet and is of type "support",
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
"Monthly Active User Limit Exceeded",
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
|
||||
)
|
||||
def check_auth_blocking(self, *args, **kwargs):
|
||||
return self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
|
||||
104
synapse/api/auth_blocking.py
Normal file
104
synapse/api/auth_blocking.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthBlocking(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
self._hs_disabled = hs.config.hs_disabled
|
||||
self._hs_disabled_message = hs.config.hs_disabled_message
|
||||
self._admin_contact = hs.config.admin_contact
|
||||
self._max_mau_value = hs.config.max_mau_value
|
||||
self._limit_usage_by_mau = hs.config.limit_usage_by_mau
|
||||
self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
|
||||
user_type(str|None): If present, is used to decide whether to check against
|
||||
certain blocking reasons like MAU.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users or support user
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id is not None:
|
||||
if user_id == self._server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
return
|
||||
|
||||
if self._hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
self._hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self._admin_contact,
|
||||
limit_type=LimitBlockingTypes.HS_DISABLED,
|
||||
)
|
||||
if self._limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self._mau_limits_reserved_threepids, threepid):
|
||||
return
|
||||
elif user_type == UserTypes.SUPPORT:
|
||||
# If the user does not exist yet and is of type "support",
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self._max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
"Monthly Active User Limit Exceeded",
|
||||
admin_contact=self._admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
|
||||
)
|
||||
@@ -61,6 +61,7 @@ class LoginType(object):
|
||||
MSISDN = "m.login.msisdn"
|
||||
RECAPTCHA = "m.login.recaptcha"
|
||||
TERMS = "m.login.terms"
|
||||
SSO = "org.matrix.login.sso"
|
||||
DUMMY = "m.login.dummy"
|
||||
|
||||
# Only for C/S API v1
|
||||
@@ -96,6 +97,8 @@ class EventTypes(object):
|
||||
|
||||
Retention = "m.room.retention"
|
||||
|
||||
Presence = "m.presence"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
|
||||
@@ -64,8 +64,16 @@ class Codes(object):
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
||||
PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
|
||||
PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
|
||||
PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
|
||||
PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
|
||||
PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
|
||||
PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
BAD_ALIAS = "M_BAD_ALIAS"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -78,7 +86,14 @@ class CodeMessageException(RuntimeError):
|
||||
|
||||
def __init__(self, code, msg):
|
||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||
self.code = code
|
||||
|
||||
# Some calls to this method pass instances of http.HTTPStatus for `code`.
|
||||
# While HTTPStatus is a subclass of int, it has magic __str__ methods
|
||||
# which emit `HTTPStatus.FORBIDDEN` when converted to a str, instead of `403`.
|
||||
# This causes inconsistency in our log lines.
|
||||
#
|
||||
# To eliminate this behaviour, we convert them to their integer equivalents here.
|
||||
self.code = int(code)
|
||||
self.msg = msg
|
||||
|
||||
|
||||
@@ -438,6 +453,20 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
return cs_error(self.msg, self.errcode, room_version=self._room_version)
|
||||
|
||||
|
||||
class PasswordRefusedError(SynapseError):
|
||||
"""A password has been refused, either during password reset/change or registration.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
msg="This password doesn't comply with the server's policy",
|
||||
errcode=Codes.WEAK_PASSWORD,
|
||||
):
|
||||
super(PasswordRefusedError, self).__init__(
|
||||
code=400, msg=msg, errcode=errcode,
|
||||
)
|
||||
|
||||
|
||||
class RequestSendFailed(RuntimeError):
|
||||
"""Sending a HTTP request over federation failed due to not being able to
|
||||
talk to the remote server for some reason.
|
||||
|
||||
@@ -57,7 +57,7 @@ class RoomVersion(object):
|
||||
state_res = attr.ib() # int; one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib() # bool
|
||||
|
||||
# bool: before MSC2260, anyone was allowed to send an aliases event
|
||||
# bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
|
||||
special_case_aliases_auth = attr.ib(type=bool, default=False)
|
||||
|
||||
|
||||
@@ -102,12 +102,13 @@ class RoomVersions(object):
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
MSC2260_DEV = RoomVersion(
|
||||
"org.matrix.msc2260",
|
||||
MSC2432_DEV = RoomVersion(
|
||||
"org.matrix.msc2432",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -119,6 +120,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V3,
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.MSC2260_DEV,
|
||||
RoomVersions.MSC2432_DEV,
|
||||
)
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -22,6 +22,7 @@ import sys
|
||||
import traceback
|
||||
|
||||
from daemonize import Daemonize
|
||||
from typing_extensions import NoReturn
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
@@ -139,9 +140,9 @@ def start_reactor(
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
def quit_with_error(error_string: str) -> NoReturn:
|
||||
message_lines = error_string.split("\n")
|
||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||
line_length = max(len(line) for line in message_lines if len(line) < 80) + 2
|
||||
sys.stderr.write("*" * line_length + "\n")
|
||||
for line in message_lines:
|
||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||
@@ -270,15 +271,25 @@ def start(hs, listeners=None):
|
||||
|
||||
# Start the tracer
|
||||
synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa
|
||||
hs.config
|
||||
hs
|
||||
)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().db.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
# everything currently allocated are things that will be used for the
|
||||
# rest of time. Doing so means less work each GC (hopefully).
|
||||
#
|
||||
# This only works on Python 3.7
|
||||
if sys.version_info >= (3, 7):
|
||||
gc.collect()
|
||||
gc.freeze()
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
reactor = hs.get_reactor()
|
||||
@@ -306,7 +317,7 @@ def setup_sentry(hs):
|
||||
scope.set_tag("matrix_server_name", hs.config.server_name)
|
||||
|
||||
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
name = hs.get_instance_name()
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
@@ -43,7 +43,6 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -79,17 +78,6 @@ class AdminCmdServer(HomeServer):
|
||||
def start_listening(self, listeners):
|
||||
pass
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return AdminCmdReplicationHandler(self)
|
||||
|
||||
|
||||
class AdminCmdReplicationHandler(ReplicationClientHandler):
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
pass
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
return {}
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def export_data_command(hs, args):
|
||||
|
||||
@@ -13,161 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class AppserviceSlaveStore(
|
||||
DirectoryStore,
|
||||
SlavedEventStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
DATASTORE_CLASS = AppserviceSlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ASReplicationHandler(self)
|
||||
|
||||
|
||||
class ASReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
run_in_background(self._notify_app_services, max_stream_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_app_services(self, room_stream_id):
|
||||
try:
|
||||
yield self.appservice_handler.notify_interested_services(room_stream_id)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of event")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse appservice", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.appservice"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ps, config, use_worker_options=True)
|
||||
|
||||
ps.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ps, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-appservice", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,195 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomMemberListRestServlet,
|
||||
RoomMessageListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = ClientReaderSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
RoomMemberListRestServlet(self).register(resource)
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
RoomMessageListRestServlet(self).register(resource)
|
||||
RegisterRestServlet(self).register(resource)
|
||||
LoginRestServlet(self).register(resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
KeyQueryServlet(self).register(resource)
|
||||
KeyChangesServlet(self).register(resource)
|
||||
VoipRestServlet(self).register(resource)
|
||||
PushRuleRestServlet(self).register(resource)
|
||||
VersionsRestServlet(self).register(resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({"/_matrix/client": resource})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse client reader", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.client_reader"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-client-reader", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,191 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinRoomAliasServlet,
|
||||
RoomMembershipRestServlet,
|
||||
RoomSendEventRestServlet,
|
||||
RoomStateEventRestServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedEventStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class EventCreatorServer(HomeServer):
|
||||
DATASTORE_CLASS = EventCreatorSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
RoomSendEventRestServlet(self).register(resource)
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
ProfileAvatarURLRestServlet(self).register(resource)
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
resources.update(
|
||||
{
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
}
|
||||
)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse event creator now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse event creator", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.event_creator"
|
||||
|
||||
assert config.worker_replication_http_port is not None
|
||||
|
||||
# This should only be done on the user directory worker or the master
|
||||
config.update_user_directory = False
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-event-creator", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,177 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedAccountDataStore,
|
||||
SlavedProfileStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedPusherStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationReaderSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "federation":
|
||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||
if name == "openid" and "federation" not in res["names"]:
|
||||
# Only load the openid resource separately if federation resource
|
||||
# is not specified since federation resource includes openid
|
||||
# resource.
|
||||
resources.update(
|
||||
{
|
||||
FEDERATION_PREFIX: TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_reader"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,308 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation import send_queue
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams._base import (
|
||||
DeviceListsStream,
|
||||
ReceiptsStream,
|
||||
ToDeviceStream,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import Database
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPresenceStore,
|
||||
):
|
||||
def __init__(self, database: Database, db_conn, hs):
|
||||
super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
# We pull out the current federation stream position now so that we
|
||||
# always have a known value for the federation position in memory so
|
||||
# that we don't have to bounce via a deferred once when we start the
|
||||
# replication streams.
|
||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||
|
||||
def _get_federation_out_pos(self, db_conn):
|
||||
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, ("federation",))
|
||||
rows = txn.fetchall()
|
||||
txn.close()
|
||||
|
||||
return rows[0][0] if rows else -1
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationSenderSlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return FederationSenderReplicationHandler(self)
|
||||
|
||||
|
||||
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(
|
||||
FederationSenderReplicationHandler, self
|
||||
).get_streams_to_replicate()
|
||||
args.update(self.send_handler.stream_positions())
|
||||
return args
|
||||
|
||||
def on_remote_server_up(self, server: str):
|
||||
"""Called when get a new REMOTE_SERVER_UP command."""
|
||||
|
||||
# Let's wake up the transaction queue for the server in case we have
|
||||
# pending stuff to send to it.
|
||||
self.send_handler.wake_destination(server)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation sender", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_sender"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
|
||||
ss = FederationSenderServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
"""Processes the replication stream and forwards the appropriate entries
|
||||
to the federation sender.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: FederationSenderServer, replication_client):
|
||||
self.store = hs.get_datastore()
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.replication_client = replication_client
|
||||
|
||||
self.federation_position = self.store.federation_out_pos_startup
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
|
||||
self._last_ack = self.federation_position
|
||||
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
def stream_positions(self):
|
||||
return {"federation": self.federation_position}
|
||||
|
||||
def process_replication_rows(self, stream_name, token, rows):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
if stream_name == "federation":
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
run_in_background(self.update_token, token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
self.federation_sender.notify_new_events(token)
|
||||
|
||||
# ... and when new receipts happen
|
||||
elif stream_name == ReceiptsStream.NAME:
|
||||
run_as_background_process(
|
||||
"process_receipts_for_federation", self._on_new_receipts, rows
|
||||
)
|
||||
|
||||
# ... as well as device updates and messages
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
hosts = set(row.destination for row in rows)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
# clients and remote servers, so we ignore entities that start with
|
||||
# '@' (since they'll be local users rather than destinations).
|
||||
hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_new_receipts(self, rows):
|
||||
"""
|
||||
Args:
|
||||
rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
|
||||
new receipts to be processed
|
||||
"""
|
||||
for receipt in rows:
|
||||
# we only want to send on receipts for our own users
|
||||
if not self._is_mine_id(receipt.user_id):
|
||||
continue
|
||||
receipt_info = ReadReceipt(
|
||||
receipt.room_id,
|
||||
receipt.receipt_type,
|
||||
receipt.user_id,
|
||||
[receipt.event_id],
|
||||
receipt.data,
|
||||
)
|
||||
yield self.federation_sender.send_read_receipt(receipt_info)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_token(self, token):
|
||||
try:
|
||||
self.federation_position = token
|
||||
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(
|
||||
self.federation_position
|
||||
)
|
||||
self._last_ack = self.federation_position
|
||||
except Exception:
|
||||
logger.exception("Error updating federation stream position")
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,241 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(RestServlet):
|
||||
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {"Authorization": auth_headers}
|
||||
|
||||
try:
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
return 200, result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(KeyUploadServlet, self).__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, device_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||
headers = {"Authorization": auth_headers}
|
||||
result = yield self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class FrontendProxySlavedStore(
|
||||
SlavedDeviceStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
DATASTORE_CLASS = FrontendProxySlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
|
||||
resources.update(
|
||||
{
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
}
|
||||
)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse frontend proxy", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||
|
||||
assert config.worker_main_http_uri is not None
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
949
synapse/app/generic_worker.py
Normal file
949
synapse/app/generic_worker.py
Normal file
@@ -0,0 +1,949 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, Iterable
|
||||
|
||||
from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.api.urls import (
|
||||
CLIENT_API_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.handlers.presence import BasePresenceHandler, get_interested_parties
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationDataHandler
|
||||
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
||||
from synapse.replication.tcp.streams import (
|
||||
AccountDataStream,
|
||||
DeviceListsStream,
|
||||
GroupServerStream,
|
||||
PresenceStream,
|
||||
PushersStream,
|
||||
PushRulesStream,
|
||||
ReceiptsStream,
|
||||
TagAccountDataStream,
|
||||
ToDeviceStream,
|
||||
TypingStream,
|
||||
)
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
JoinRoomAliasServlet,
|
||||
PublicRoomListRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomInitialSyncRestServlet,
|
||||
RoomMemberListRestServlet,
|
||||
RoomMembershipRestServlet,
|
||||
RoomMessageListRestServlet,
|
||||
RoomSendEventRestServlet,
|
||||
RoomStateEventRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups, sync, user_directory
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.account_data import (
|
||||
AccountDataServlet,
|
||||
RoomAccountDataServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.data_stores.main.search import SearchWorkerStore
|
||||
from synapse.storage.data_stores.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(RestServlet):
|
||||
"""If presence is disabled this servlet can be used to stub out setting
|
||||
presence status, while proxying the getters to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
async def on_GET(self, request, user_id):
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {"Authorization": auth_headers}
|
||||
|
||||
try:
|
||||
result = await self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
return 200, result
|
||||
|
||||
async def on_PUT(self, request, user_id):
|
||||
await self.auth.get_user_by_req(request)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(KeyUploadServlet, self).__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
async def on_POST(self, request, device_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||
headers = {"Authorization": auth_headers}
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class _NullContextManager(ContextManager[None]):
|
||||
"""A context manager which does nothing."""
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class GenericWorkerPresence(BasePresenceHandler):
|
||||
def __init__(self, hs):
|
||||
super().__init__(hs)
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
self._presence_enabled = hs.config.use_presence
|
||||
|
||||
# The number of ongoing syncs on this process, by user id.
|
||||
# Empty if _presence_enabled is false.
|
||||
self._user_to_num_current_syncs = {} # type: Dict[str, int]
|
||||
|
||||
self.notifier = hs.get_notifier()
|
||||
self.instance_id = hs.get_instance_id()
|
||||
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||
# but we haven't notified the master of that yet
|
||||
self.users_going_offline = {}
|
||||
|
||||
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
||||
)
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
run_as_background_process,
|
||||
"generic_presence.on_shutdown",
|
||||
self._on_shutdown,
|
||||
)
|
||||
|
||||
def _on_shutdown(self):
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_command(
|
||||
ClearUserSyncsCommand(self.instance_id)
|
||||
)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
self.instance_id, user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
had recently stopped syncing.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
going_offline = self.users_going_offline.pop(user_id, None)
|
||||
if not going_offline:
|
||||
# Safe to skip because we haven't yet told the master they were offline
|
||||
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||
|
||||
def mark_as_going_offline(self, user_id):
|
||||
"""A user has stopped syncing. We wait before notifying the master as
|
||||
its likely they'll come back soon. This allows us to avoid sending
|
||||
a stopped syncing immediately followed by a started syncing notification
|
||||
to the master
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||
|
||||
def send_stop_syncing(self):
|
||||
"""Check if there are any users who have stopped syncing a while ago
|
||||
and haven't come back yet. If there are poke the master about them.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
||||
if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
|
||||
self.users_going_offline.pop(user_id, None)
|
||||
self.send_user_sync(user_id, False, last_sync_ms)
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
return defer.succeed(None)
|
||||
|
||||
async def user_syncing(
|
||||
self, user_id: str, affect_presence: bool
|
||||
) -> ContextManager[None]:
|
||||
"""Record that a user is syncing.
|
||||
|
||||
Called by the sync and events servlets to record that a user has connected to
|
||||
this worker and is waiting for some events.
|
||||
"""
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
return _NullContextManager()
|
||||
|
||||
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
|
||||
self._user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
# If we went from no in flight sync to some, notify replication
|
||||
if self._user_to_num_current_syncs[user_id] == 1:
|
||||
self.mark_as_coming_online(user_id)
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if user_id in self._user_to_num_current_syncs:
|
||||
self._user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
# If we went from one in flight sync to non, notify replication
|
||||
if self._user_to_num_current_syncs[user_id] == 0:
|
||||
self.mark_as_going_offline(user_id)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
return _user_syncing()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield get_interested_parties(self.store, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key",
|
||||
stream_id,
|
||||
rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys(),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication_rows(self, token, rows):
|
||||
states = [
|
||||
UserPresenceState(
|
||||
row.user_id,
|
||||
row.state,
|
||||
row.last_active_ts,
|
||||
row.last_federation_update_ts,
|
||||
row.last_user_sync_ts,
|
||||
row.status_msg,
|
||||
row.currently_active,
|
||||
)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
for state in states:
|
||||
self.user_to_current_state[state.user_id] = state
|
||||
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
||||
return [
|
||||
user_id
|
||||
for user_id, count in self._user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
]
|
||||
|
||||
|
||||
class GenericWorkerTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
self._room_serials[row.room_id] = token
|
||||
self._room_typing[row.room_id] = row.user_ids
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self._latest_room_serial
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
UIAuthWorkerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
CensorEventsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedFilteringStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
SearchWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(self, database, db_conn, hs):
|
||||
super(GenericWorkerSlavedStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
# We pull out the current federation stream position now so that we
|
||||
# always have a known value for the federation position in memory so
|
||||
# that we don't have to bounce via a deferred once when we start the
|
||||
# replication streams.
|
||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||
|
||||
def _get_federation_out_pos(self, db_conn):
|
||||
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, ("federation",))
|
||||
rows = txn.fetchall()
|
||||
txn.close()
|
||||
|
||||
return rows[0][0] if rows else -1
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
RoomMemberListRestServlet(self).register(resource)
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
RoomMessageListRestServlet(self).register(resource)
|
||||
RegisterRestServlet(self).register(resource)
|
||||
LoginRestServlet(self).register(resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
KeyQueryServlet(self).register(resource)
|
||||
KeyChangesServlet(self).register(resource)
|
||||
VoipRestServlet(self).register(resource)
|
||||
PushRuleRestServlet(self).register(resource)
|
||||
VersionsRestServlet(self).register(resource)
|
||||
RoomSendEventRestServlet(self).register(resource)
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
ProfileAvatarURLRestServlet(self).register(resource)
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
AccountDataServlet(self).register(resource)
|
||||
RoomAccountDataServlet(self).register(resource)
|
||||
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
|
||||
user_directory.register_servlets(self, resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({CLIENT_API_PREFIX: resource})
|
||||
elif name == "federation":
|
||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||
elif name == "media":
|
||||
if self.config.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
|
||||
# We need to serve the admin servlets for media on the
|
||||
# worker.
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
|
||||
resources.update(
|
||||
{
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
"/_synapse/admin": admin_resource,
|
||||
}
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"A 'media' listener is configured but the media"
|
||||
" repository is disabled. Ignoring."
|
||||
)
|
||||
|
||||
if name == "openid" and "federation" not in res["names"]:
|
||||
# Only load the openid resource separately if federation resource
|
||||
# is not specified since federation resource includes openid
|
||||
# resource.
|
||||
resources.update(
|
||||
{
|
||||
FEDERATION_PREFIX: TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
def build_replication_data_handler(self):
|
||||
return GenericWorkerReplicationHandler(self)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return GenericWorkerPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return GenericWorkerTyping(self)
|
||||
|
||||
|
||||
class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
def __init__(self, hs):
|
||||
super(GenericWorkerReplicationHandler, self).__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.notify_pushers = hs.config.start_pushers
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
if hs.config.send_federation:
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
else:
|
||||
self.send_handler = None
|
||||
|
||||
async def on_rdata(self, stream_name, instance_name, token, rows):
|
||||
await super().on_rdata(stream_name, instance_name, token, rows)
|
||||
await self._process_and_notify(stream_name, instance_name, token, rows)
|
||||
|
||||
async def _process_and_notify(self, stream_name, instance_name, token, rows):
|
||||
try:
|
||||
if self.send_handler:
|
||||
await self.send_handler.process_replication_rows(
|
||||
stream_name, token, rows
|
||||
)
|
||||
|
||||
if stream_name == PushRulesStream.NAME:
|
||||
self.notifier.on_new_event(
|
||||
"push_rules_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name == ReceiptsStream.NAME:
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows]
|
||||
)
|
||||
await self.pusher_pool.on_new_receipts(
|
||||
token, token, {row.room_id for row in rows}
|
||||
)
|
||||
elif stream_name == TypingStream.NAME:
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows]
|
||||
)
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
self.notifier.on_new_event("to_device_key", token, users=entities)
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
if row.entity.startswith("@"):
|
||||
room_ids = await self.store.get_rooms_for_user(row.entity)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
|
||||
elif stream_name == PresenceStream.NAME:
|
||||
await self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == GroupServerStream.NAME:
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name == PushersStream.NAME:
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
await self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
except Exception:
|
||||
logger.exception("Error processing replication")
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
if not self.notify_pushers:
|
||||
return
|
||||
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
async def start_pusher(self, user_id, app_id, pushkey):
|
||||
if not self.notify_pushers:
|
||||
return
|
||||
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
|
||||
|
||||
def on_remote_server_up(self, server: str):
|
||||
"""Called when get a new REMOTE_SERVER_UP command."""
|
||||
|
||||
# Let's wake up the transaction queue for the server in case we have
|
||||
# pending stuff to send to it.
|
||||
if self.send_handler:
|
||||
self.send_handler.wake_destination(server)
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
"""Processes the replication stream and forwards the appropriate entries
|
||||
to the federation sender.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: GenericWorkerServer, replication_client):
|
||||
self.store = hs.get_datastore()
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.replication_client = replication_client
|
||||
|
||||
self.federation_position = self.store.federation_out_pos_startup
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
|
||||
self._last_ack = self.federation_position
|
||||
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
async def process_replication_rows(self, stream_name, token, rows):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
if stream_name == "federation":
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
await self.update_token(token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
self.federation_sender.notify_new_events(token)
|
||||
|
||||
# ... and when new receipts happen
|
||||
elif stream_name == ReceiptsStream.NAME:
|
||||
await self._on_new_receipts(rows)
|
||||
|
||||
# ... as well as device updates and messages
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
hosts = {row.entity for row in rows if not row.entity.startswith("@")}
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
# clients and remote servers, so we ignore entities that start with
|
||||
# '@' (since they'll be local users rather than destinations).
|
||||
hosts = {row.entity for row in rows if not row.entity.startswith("@")}
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
async def _on_new_receipts(self, rows):
|
||||
"""
|
||||
Args:
|
||||
rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]):
|
||||
new receipts to be processed
|
||||
"""
|
||||
for receipt in rows:
|
||||
# we only want to send on receipts for our own users
|
||||
if not self._is_mine_id(receipt.user_id):
|
||||
continue
|
||||
receipt_info = ReadReceipt(
|
||||
receipt.room_id,
|
||||
receipt.receipt_type,
|
||||
receipt.user_id,
|
||||
[receipt.event_id],
|
||||
receipt.data,
|
||||
)
|
||||
await self.federation_sender.send_read_receipt(receipt_info)
|
||||
|
||||
async def update_token(self, token):
|
||||
try:
|
||||
self.federation_position = token
|
||||
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (await self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
await self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(
|
||||
self.federation_position
|
||||
)
|
||||
self._last_ack = self.federation_position
|
||||
except Exception:
|
||||
logger.exception("Error updating federation stream position")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse worker", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
# For backwards compatibility let any of the old app names.
|
||||
assert config.worker_app in (
|
||||
"synapse.app.appservice",
|
||||
"synapse.app.client_reader",
|
||||
"synapse.app.event_creator",
|
||||
"synapse.app.federation_reader",
|
||||
"synapse.app.federation_sender",
|
||||
"synapse.app.frontend_proxy",
|
||||
"synapse.app.generic_worker",
|
||||
"synapse.app.media_repository",
|
||||
"synapse.app.pusher",
|
||||
"synapse.app.synchrotron",
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.worker_app == "synapse.app.appservice":
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the appservice to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.notify_appservices = False
|
||||
|
||||
if config.worker_app == "synapse.app.pusher":
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.start_pushers = False
|
||||
|
||||
if config.worker_app == "synapse.app.user_dir":
|
||||
if config.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.update_user_directory = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.update_user_directory = False
|
||||
|
||||
if config.worker_app == "synapse.app.federation_sender":
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.send_federation = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(hs, config, use_worker_options=True)
|
||||
|
||||
hs.setup()
|
||||
|
||||
# Ensure the replication streamer is always started in case we write to any
|
||||
# streams. Will no-op if no streams can be written to by this worker.
|
||||
hs.get_replication_streamer()
|
||||
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, hs, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -69,7 +69,6 @@ from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
@@ -192,6 +191,11 @@ class SynapseHomeServer(HomeServer):
|
||||
}
|
||||
)
|
||||
|
||||
if self.get_config().oidc_enabled:
|
||||
from synapse.rest.oidc import OIDCResource
|
||||
|
||||
resources["/_synapse/oidc"] = OIDCResource(self)
|
||||
|
||||
if self.get_config().saml2_enabled:
|
||||
from synapse.rest.saml2 import SAML2Resource
|
||||
|
||||
@@ -241,16 +245,26 @@ class SynapseHomeServer(HomeServer):
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
if name == "webclient":
|
||||
webclient_path = self.get_config().web_client_location
|
||||
webclient_loc = self.get_config().web_client_location
|
||||
|
||||
if webclient_path is None:
|
||||
if webclient_loc is None:
|
||||
logger.warning(
|
||||
"Not enabling webclient resource, as web_client_location is unset."
|
||||
)
|
||||
elif webclient_loc.startswith("http://") or webclient_loc.startswith(
|
||||
"https://"
|
||||
):
|
||||
resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc)
|
||||
else:
|
||||
logger.warning(
|
||||
"Running webclient on the same domain is not recommended: "
|
||||
"https://github.com/matrix-org/synapse#security-note - "
|
||||
"after you move webclient to different host you can set "
|
||||
"web_client_location to its full URL to enable redirection."
|
||||
)
|
||||
# GZip is disabled here due to
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
resources[WEB_CLIENT_PREFIX] = File(webclient_path)
|
||||
resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
|
||||
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
@@ -263,6 +277,12 @@ class SynapseHomeServer(HomeServer):
|
||||
def start_listening(self, listeners):
|
||||
config = self.get_config()
|
||||
|
||||
if config.redis_enabled:
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listening_services.extend(self._listener_http(config, listener))
|
||||
@@ -298,6 +318,11 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
current_mau_by_service_gauge = Gauge(
|
||||
"synapse_admin_mau_current_mau_by_service",
|
||||
"Current MAU by service",
|
||||
["app_service"],
|
||||
)
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
@@ -401,9 +426,15 @@ def setup(config_options):
|
||||
# Check if it needs to be reprovisioned every day.
|
||||
hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
|
||||
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
yield defer.ensureDeferred(oidc.load_metadata())
|
||||
yield defer.ensureDeferred(oidc.load_jwks())
|
||||
|
||||
_base.start(hs, config.listeners)
|
||||
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_datastore().db.updates.start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
@@ -484,8 +515,8 @@ def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
stats["cache_factor"] = hs.config.caches.global_factor
|
||||
stats["event_cache_size"] = hs.config.caches.event_cache_size
|
||||
|
||||
#
|
||||
# Performance statistics
|
||||
@@ -585,12 +616,20 @@ def run(hs):
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
current_mau_count = 0
|
||||
current_mau_count_by_service = {}
|
||||
reserved_users = ()
|
||||
store = hs.get_datastore()
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
current_mau_count = yield store.get_monthly_active_count()
|
||||
current_mau_count_by_service = (
|
||||
yield store.get_monthly_active_count_by_service()
|
||||
)
|
||||
reserved_users = yield store.get_registered_reserved_users()
|
||||
current_mau_gauge.set(float(current_mau_count))
|
||||
|
||||
for app_service, count in current_mau_count_by_service.items():
|
||||
current_mau_by_service_gauge.labels(app_service).set(float(count))
|
||||
|
||||
registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
|
||||
|
||||
@@ -13,162 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
RoomStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedTransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
DATASTORE_CLASS = MediaRepositorySlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "media":
|
||||
media_repo = self.get_media_repository_resource()
|
||||
|
||||
# We need to serve the admin servlets for media on the
|
||||
# worker.
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
|
||||
resources.update(
|
||||
{
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
"/_synapse/admin": admin_resource,
|
||||
}
|
||||
)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse media repository", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
if config.enable_media_repo:
|
||||
_base.quit_with_error(
|
||||
"enable_media_repo must be disabled in the main synapse process\n"
|
||||
"before the media repo can be run in a separate worker.\n"
|
||||
"Please add ``enable_media_repo: false`` to the main config\n"
|
||||
)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-media-repository", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -13,213 +13,12 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import __func__
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore,
|
||||
SlavedPusherStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
RoomStore,
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = __func__(
|
||||
DataStore.update_pusher_last_stream_ordering_and_success
|
||||
)
|
||||
|
||||
update_pusher_failing_since = __func__(DataStore.update_pusher_failing_since)
|
||||
|
||||
update_pusher_last_stream_ordering = __func__(
|
||||
DataStore.update_pusher_last_stream_ordering
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = __func__(DataStore.get_throttle_params_by_room)
|
||||
|
||||
set_throttle_params = __func__(DataStore.set_throttle_params)
|
||||
|
||||
get_time_of_last_push_action_before = __func__(
|
||||
DataStore.get_time_of_last_push_action_before
|
||||
)
|
||||
|
||||
get_profile_displayname = __func__(DataStore.get_profile_displayname)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
DATASTORE_CLASS = PusherSlaveStore
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return PusherReplicationHandler(self)
|
||||
|
||||
|
||||
class PusherReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(self, stream_name, token, rows):
|
||||
try:
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
yield self.pusher_pool.on_new_notifications(token, token)
|
||||
elif stream_name == "receipts":
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error poking pushers")
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse pusher", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.pusher"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ps, config, use_worker_options=True)
|
||||
|
||||
ps.setup()
|
||||
|
||||
def start():
|
||||
_base.start(ps, config.worker_listeners)
|
||||
ps.get_pusherpool().start()
|
||||
|
||||
reactor.addSystemEventTrigger("before", "startup", start)
|
||||
|
||||
_base.start_worker_reactor("synapse-pusher", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
ps = start(sys.argv[1:])
|
||||
start(sys.argv[1:])
|
||||
|
||||
@@ -13,454 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {state.user_id: state for state in active_presence}
|
||||
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||
# but we haven't notified the master of that yet
|
||||
self.users_going_offline = {}
|
||||
|
||||
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||
self.send_stop_syncing, 10 * 1000
|
||||
)
|
||||
|
||||
self.process_id = random_string(16)
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
if self.hs.config.use_presence:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
had recently stopped syncing.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
going_offline = self.users_going_offline.pop(user_id, None)
|
||||
if not going_offline:
|
||||
# Safe to skip because we haven't yet told the master they were offline
|
||||
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||
|
||||
def mark_as_going_offline(self, user_id):
|
||||
"""A user has stopped syncing. We wait before notifying the master as
|
||||
its likely they'll come back soon. This allows us to avoid sending
|
||||
a stopped syncing immediately followed by a started syncing notification
|
||||
to the master
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
"""
|
||||
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||
|
||||
def send_stop_syncing(self):
|
||||
"""Check if there are any users who have stopped syncing a while ago
|
||||
and haven't come back yet. If there are poke the master about them.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
||||
if now - last_sync_ms > 10 * 1000:
|
||||
self.users_going_offline.pop(user_id, None)
|
||||
self.send_user_sync(user_id, False, last_sync_ms)
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
return defer.succeed(None)
|
||||
|
||||
get_states = __func__(PresenceHandler.get_states)
|
||||
get_state = __func__(PresenceHandler.get_state)
|
||||
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
|
||||
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
# If we went from no in flight sync to some, notify replication
|
||||
if self.user_to_num_current_syncs[user_id] == 1:
|
||||
self.mark_as_coming_online(user_id)
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
# If we went from one in flight sync to non, notify replication
|
||||
if self.user_to_num_current_syncs[user_id] == 0:
|
||||
self.mark_as_going_offline(user_id)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
return defer.succeed(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield get_interested_parties(self.store, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key",
|
||||
stream_id,
|
||||
rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys(),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication_rows(self, token, rows):
|
||||
states = [
|
||||
UserPresenceState(
|
||||
row.user_id,
|
||||
row.state,
|
||||
row.last_active_ts,
|
||||
row.last_federation_update_ts,
|
||||
row.last_user_sync_ts,
|
||||
row.status_msg,
|
||||
row.currently_active,
|
||||
)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
for state in states:
|
||||
self.user_to_current_state[state.user_id] = state
|
||||
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
if self.hs.config.use_presence:
|
||||
return [
|
||||
user_id
|
||||
for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||
if count > 0
|
||||
]
|
||||
else:
|
||||
return set()
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
self._room_serials[row.room_id] = token
|
||||
self._room_typing[row.room_id] = row.user_ids
|
||||
|
||||
|
||||
class SynchrotronApplicationService(object):
|
||||
def notify_interested_services(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
DATASTORE_CLASS = SynchrotronSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
resources.update(
|
||||
{
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
}
|
||||
)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return SyncReplicationHandler(self)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return SynchrotronPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return SynchrotronTyping(self)
|
||||
|
||||
|
||||
class SyncReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||
args.update(self.typing_handler.stream_positions())
|
||||
return args
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
return self.presence_handler.get_currently_syncing_users()
|
||||
|
||||
async def process_and_notify(self, stream_name, token, rows):
|
||||
try:
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
if row.type != EventsStreamEventRow.TypeId:
|
||||
continue
|
||||
assert isinstance(row, EventsStreamRow)
|
||||
|
||||
event = await self.store.get_event(
|
||||
row.data.event_id, allow_rejected=True
|
||||
)
|
||||
if event.rejected_reason:
|
||||
continue
|
||||
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"push_rules_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data"):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows]
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows]
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
self.notifier.on_new_event("to_device_key", token, users=entities)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = await self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
|
||||
elif stream_name == "presence":
|
||||
await self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows]
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error processing replication")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse synchrotron", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.synchrotron"
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -14,217 +14,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams.events import (
|
||||
EventsStream,
|
||||
EventsStreamCurrentStateRow,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha import user_directory
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.database import Database
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
|
||||
class UserDirectorySlaveStore(
|
||||
SlavedEventStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
UserDirectoryStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(self, database: Database, db_conn, hs):
|
||||
super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
|
||||
db_conn,
|
||||
"current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
stream_column="stream_id",
|
||||
max_value=events_max, # As we share the stream id with events token
|
||||
limit=1000,
|
||||
)
|
||||
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||
"_curr_state_delta_stream_cache",
|
||||
min_curr_state_delta_id,
|
||||
prefilled_cache=curr_state_delta_prefill,
|
||||
)
|
||||
|
||||
def stream_positions(self):
|
||||
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||
return result
|
||||
|
||||
def process_replication_rows(self, stream_name, token, rows):
|
||||
if stream_name == EventsStream.NAME:
|
||||
self._stream_id_gen.advance(token)
|
||||
for row in rows:
|
||||
if row.type != EventsStreamCurrentStateRow.TypeId:
|
||||
continue
|
||||
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||
row.data.room_id, token
|
||||
)
|
||||
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||
stream_name, token, rows
|
||||
)
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
DATASTORE_CLASS = UserDirectorySlaveStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
user_directory.register_servlets(self, resource)
|
||||
resources.update(
|
||||
{
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
}
|
||||
)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
)
|
||||
|
||||
logger.info("Synapse user_dir now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return UserDirectoryReplicationHandler(self)
|
||||
|
||||
|
||||
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.user_directory = hs.get_user_directory_handler()
|
||||
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == EventsStream.NAME:
|
||||
run_in_background(self._notify_directory)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_directory(self):
|
||||
try:
|
||||
yield self.user_directory.notify_new_event()
|
||||
except Exception:
|
||||
logger.exception("Error notifiying user directory of state update")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config("Synapse user directory", config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.user_dir"
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.update_user_directory = True
|
||||
|
||||
ss = UserDirectoryServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-user-dir", config)
|
||||
|
||||
from synapse.app.generic_worker import start
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
|
||||
@@ -294,7 +294,6 @@ class RootConfig(object):
|
||||
report_stats=None,
|
||||
open_private_ports=False,
|
||||
listeners=None,
|
||||
database_conf=None,
|
||||
tls_certificate_path=None,
|
||||
tls_private_key_path=None,
|
||||
acme_domain=None,
|
||||
@@ -367,7 +366,6 @@ class RootConfig(object):
|
||||
report_stats=report_stats,
|
||||
open_private_ports=open_private_ports,
|
||||
listeners=listeners,
|
||||
database_conf=database_conf,
|
||||
tls_certificate_path=tls_certificate_path,
|
||||
tls_private_key_path=tls_private_key_path,
|
||||
acme_domain=acme_domain,
|
||||
@@ -470,8 +468,8 @@ class RootConfig(object):
|
||||
|
||||
Returns: Config object, or None if --generate-config or --generate-keys was set
|
||||
"""
|
||||
config_parser = argparse.ArgumentParser(add_help=False)
|
||||
config_parser.add_argument(
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config-path",
|
||||
action="append",
|
||||
@@ -480,7 +478,7 @@ class RootConfig(object):
|
||||
" may specify directories containing *.yaml files.",
|
||||
)
|
||||
|
||||
generate_group = config_parser.add_argument_group("Config generation")
|
||||
generate_group = parser.add_argument_group("Config generation")
|
||||
generate_group.add_argument(
|
||||
"--generate-config",
|
||||
action="store_true",
|
||||
@@ -528,12 +526,13 @@ class RootConfig(object):
|
||||
),
|
||||
)
|
||||
|
||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||
cls.invoke_all_static("add_arguments", parser)
|
||||
config_args = parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
if not config_files:
|
||||
config_parser.error(
|
||||
parser.error(
|
||||
"Must supply a config file.\nA config file can be automatically"
|
||||
' generated using "--generate-config -H SERVER_NAME'
|
||||
' -c CONFIG-FILE"'
|
||||
@@ -552,7 +551,7 @@ class RootConfig(object):
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
config_parser.error(
|
||||
parser.error(
|
||||
"Please specify either --report-stats=yes or --report-stats=no\n\n"
|
||||
+ MISSING_REPORT_STATS_SPIEL
|
||||
)
|
||||
@@ -611,15 +610,6 @@ class RootConfig(object):
|
||||
)
|
||||
generate_missing_configs = True
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
parents=[config_parser],
|
||||
description=description,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
obj.invoke_all_static("add_arguments", parser)
|
||||
args = parser.parse_args(remaining_args)
|
||||
|
||||
config_dict = read_config_files(config_files)
|
||||
if generate_missing_configs:
|
||||
obj.generate_missing_files(config_dict, config_dir_path)
|
||||
@@ -628,7 +618,7 @@ class RootConfig(object):
|
||||
obj.parse_config_dict(
|
||||
config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path
|
||||
)
|
||||
obj.invoke_all("read_arguments", args)
|
||||
obj.invoke_all("read_arguments", config_args)
|
||||
|
||||
return obj
|
||||
|
||||
@@ -667,6 +657,12 @@ def read_config_files(config_files):
|
||||
for config_file in config_files:
|
||||
with open(config_file) as file_stream:
|
||||
yaml_config = yaml.safe_load(file_stream)
|
||||
|
||||
if not isinstance(yaml_config, dict):
|
||||
err = "File %r is empty or doesn't parse into a key-value map. IGNORING."
|
||||
print(err % (config_file,))
|
||||
continue
|
||||
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
|
||||
@@ -13,6 +13,7 @@ from synapse.config import (
|
||||
key,
|
||||
logger,
|
||||
metrics,
|
||||
oidc_config,
|
||||
password,
|
||||
password_auth_providers,
|
||||
push,
|
||||
@@ -59,6 +60,7 @@ class RootConfig:
|
||||
saml2: saml2_config.SAML2Config
|
||||
cas: cas.CasConfig
|
||||
sso: sso.SSOConfig
|
||||
oidc: oidc_config.OIDCConfig
|
||||
jwt: jwt_config.JWTConfig
|
||||
password: password.PasswordConfig
|
||||
email: emailconfig.EmailConfig
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user