Compare commits
436 Commits
v1.14.0
...
travis/gro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b024acffea | ||
|
|
acfb7c3b5d | ||
|
|
3c01724b33 | ||
|
|
5cf7c12995 | ||
|
|
408aef8276 | ||
|
|
2f4d60a5ba | ||
|
|
25e55d2598 | ||
|
|
8b6c176aee | ||
|
|
050e20e7ca | ||
|
|
e04e465b4d | ||
|
|
8390e00c7f | ||
|
|
3234d5c305 | ||
|
|
ea4e4d2f0b | ||
|
|
ad6190c925 | ||
|
|
ac77cdb64e | ||
|
|
b069b78bb4 | ||
|
|
e8861957d9 | ||
|
|
dc22090a67 | ||
|
|
6b7ce1d332 | ||
|
|
894dae74fe | ||
|
|
7bdf9828d5 | ||
|
|
bfd79c2988 | ||
|
|
93848f3c89 | ||
|
|
4550b77312 | ||
|
|
a69ba6f457 | ||
|
|
091ca3910d | ||
|
|
53834bb9c4 | ||
|
|
ff0e894656 | ||
|
|
dd8f28bd3f | ||
|
|
fbe930dad2 | ||
|
|
5ecc8b5825 | ||
|
|
5dd73d029e | ||
|
|
d68e10f308 | ||
|
|
a3a59bab7b | ||
|
|
9d1e4942ab | ||
|
|
6ba621d786 | ||
|
|
04faa0bfa9 | ||
|
|
a0acdfa9e9 | ||
|
|
fdb46b5442 | ||
|
|
c066928915 | ||
|
|
61d8ff0d44 | ||
|
|
3c796e4159 | ||
|
|
a1e9bb9eae | ||
|
|
8a3dac3c19 | ||
|
|
e1af09dccb | ||
|
|
0304ad0c3d | ||
|
|
a0f574f3c2 | ||
|
|
db131b6b22 | ||
|
|
64e5bb0dc8 | ||
|
|
0f1afbe8dc | ||
|
|
0cb169900e | ||
|
|
aa827b6ad7 | ||
|
|
39c3f68758 | ||
|
|
fcbab08cbd | ||
|
|
cdbb8e6d6e | ||
|
|
5c43c43240 | ||
|
|
1a3aabcf3f | ||
|
|
cee6c6012e | ||
|
|
7f837959ea | ||
|
|
f3fe6961b2 | ||
|
|
1048ed2afa | ||
|
|
de6f892065 | ||
|
|
2f9fd5ab00 | ||
|
|
4e874ed593 | ||
|
|
7620912d84 | ||
|
|
4dd27e6d11 | ||
|
|
367e9e6e9e | ||
|
|
bf33d5c457 | ||
|
|
2ffd6783c7 | ||
|
|
fe6cfc80ec | ||
|
|
d4a7829b12 | ||
|
|
c36228c403 | ||
|
|
66f24449dd | ||
|
|
118a9eafb3 | ||
|
|
dd11f575a2 | ||
|
|
079bc3c8e3 | ||
|
|
a7bdf98d01 | ||
|
|
0a86850ba3 | ||
|
|
8b786db323 | ||
|
|
7cac9006d6 | ||
|
|
8ff2deda72 | ||
|
|
88a3ff12f0 | ||
|
|
e19de43eb5 | ||
|
|
916cf2d439 | ||
|
|
481f76c7aa | ||
|
|
5d92a1428c | ||
|
|
6812509807 | ||
|
|
2a89ce8cd4 | ||
|
|
b6c6fb7950 | ||
|
|
3b415e23a5 | ||
|
|
db5970ac6d | ||
|
|
e2f1cccc8a | ||
|
|
1678057b56 | ||
|
|
d1008fe949 | ||
|
|
394be6a0e6 | ||
|
|
faba873d4b | ||
|
|
9b3ab57acd | ||
|
|
18de00adb4 | ||
|
|
cf42d0a60c | ||
|
|
79d991eff0 | ||
|
|
713d70d6c6 | ||
|
|
e2a4ba6f9b | ||
|
|
60328ce9fb | ||
|
|
69158e554f | ||
|
|
8b04c4cd70 | ||
|
|
6d4b790021 | ||
|
|
0a7fb24716 | ||
|
|
606805bf06 | ||
|
|
3aa36b782c | ||
|
|
c978f6c451 | ||
|
|
4cce8ef74e | ||
|
|
b3a97d6dac | ||
|
|
320ef98852 | ||
|
|
3950ae51ef | ||
|
|
fc0ef72d9c | ||
|
|
a9631b7b4b | ||
|
|
2c1b9d6763 | ||
|
|
a53e0160a2 | ||
|
|
d90087cffa | ||
|
|
3a00bd1378 | ||
|
|
f23c77389d | ||
|
|
8dff4a1242 | ||
|
|
2184f61fae | ||
|
|
3345c166a4 | ||
|
|
e866e3b896 | ||
|
|
9725c59247 | ||
|
|
8a25332d94 | ||
|
|
2c1e1b153d | ||
|
|
8078dec3be | ||
|
|
3857de2194 | ||
|
|
349119a340 | ||
|
|
7000a215e6 | ||
|
|
a8f7ed28c6 | ||
|
|
aaf9ce72a0 | ||
|
|
c4ce0da6fe | ||
|
|
68626ff8e9 | ||
|
|
f57b99af22 | ||
|
|
8553f46498 | ||
|
|
5f65e62681 | ||
|
|
8144bc26a7 | ||
|
|
7c2e2c2077 | ||
|
|
f88c48f3b8 | ||
|
|
1ef9efc1e0 | ||
|
|
84d099ae11 | ||
|
|
d8a9cd8d3e | ||
|
|
c4268e3da6 | ||
|
|
3fc8fdd150 | ||
|
|
b975fa2e99 | ||
|
|
e739b20588 | ||
|
|
53f7b49f5b | ||
|
|
5ea29d7f85 | ||
|
|
6a080ea184 | ||
|
|
1ec688bf21 | ||
|
|
fefe9943ef | ||
|
|
83434df381 | ||
|
|
7078866969 | ||
|
|
4876af06dd | ||
|
|
ff22672fd6 | ||
|
|
68cd935826 | ||
|
|
13d77464c9 | ||
|
|
cc9bb3dc3f | ||
|
|
a4cf94a3c2 | ||
|
|
55f2617f8c | ||
|
|
923c995023 | ||
|
|
b74919c72e | ||
|
|
931b026844 | ||
|
|
05060e0223 | ||
|
|
15997618e2 | ||
|
|
2ccd48e921 | ||
|
|
de119063f2 | ||
|
|
759481af6d | ||
|
|
b7ddece2a6 | ||
|
|
5662e2b0f3 | ||
|
|
64d2280299 | ||
|
|
a7b06a81f0 | ||
|
|
5ecf98f59e | ||
|
|
438020732e | ||
|
|
f2af3e4fc5 | ||
|
|
d1d5fa66e4 | ||
|
|
1ec2961b3b | ||
|
|
a5545cf86d | ||
|
|
2d2acc1cf2 | ||
|
|
a3ad045286 | ||
|
|
852930add7 | ||
|
|
4642fd66df | ||
|
|
6b3ac3b8cd | ||
|
|
00e57b755c | ||
|
|
6fca1b3506 | ||
|
|
fff483ea96 | ||
|
|
f460da6031 | ||
|
|
b0f031f92a | ||
|
|
e5300063ed | ||
|
|
346476df21 | ||
|
|
f2e38ca867 | ||
|
|
649a7ead5c | ||
|
|
a827838706 | ||
|
|
a973bcb8a4 | ||
|
|
16368c8a34 | ||
|
|
c445bc0cad | ||
|
|
3c36ae17a5 | ||
|
|
42509b8fb6 | ||
|
|
90b0cdda42 | ||
|
|
12528dc42f | ||
|
|
35450519de | ||
|
|
a57df9b827 | ||
|
|
97e1159ac1 | ||
|
|
8c7d0f163d | ||
|
|
9006e125af | ||
|
|
62352c3a1b | ||
|
|
3032b54ac9 | ||
|
|
3a3a618460 | ||
|
|
f13061d515 | ||
|
|
b11450dedc | ||
|
|
111e70d75c | ||
|
|
1d9dca02f9 | ||
|
|
8d0097bef1 | ||
|
|
85223106f3 | ||
|
|
491f0dab1b | ||
|
|
77d2c05410 | ||
|
|
4db1509516 | ||
|
|
93c8b077ed | ||
|
|
f886a69916 | ||
|
|
457096e6df | ||
|
|
504c8f3483 | ||
|
|
fa361c8f65 | ||
|
|
59e64b6d5b | ||
|
|
29df3d0e9f | ||
|
|
66a4af8d96 | ||
|
|
d9e47af617 | ||
|
|
1bca21e1da | ||
|
|
6cef918a4b | ||
|
|
8ccb7f08d9 | ||
|
|
f299441cc6 | ||
|
|
f1245dc3c0 | ||
|
|
e29c44340b | ||
|
|
e66e38bbd7 | ||
|
|
b1beb3ff59 | ||
|
|
e6fbb0c121 | ||
|
|
c9f7c683ae | ||
|
|
cbabcec05c | ||
|
|
43726783e4 | ||
|
|
38e1fac886 | ||
|
|
53ee214f2f | ||
|
|
8ca39bd2c3 | ||
|
|
08c5181a8d | ||
|
|
8fa7fdd4cb | ||
|
|
2ab0b021f1 | ||
|
|
67593b1728 | ||
|
|
ef5ed5292b | ||
|
|
e7efd8f827 | ||
|
|
ff0680f69d | ||
|
|
e0c0129693 | ||
|
|
59ddcd790b | ||
|
|
e7f880ce7e | ||
|
|
98894341e7 | ||
|
|
96bb01d8ec | ||
|
|
76dbd7b8d6 | ||
|
|
67d7756fcf | ||
|
|
d378c3da78 | ||
|
|
2a266f4511 | ||
|
|
6d687ebba1 | ||
|
|
57feeab364 | ||
|
|
4e118742ca | ||
|
|
62b1ce8539 | ||
|
|
5cdca53aa0 | ||
|
|
21a212f8e5 | ||
|
|
8097659f6e | ||
|
|
f3e0f16240 | ||
|
|
6f238a7074 | ||
|
|
1a76cdf8d4 | ||
|
|
1319e53251 | ||
|
|
f2bcc6ecbf | ||
|
|
4d978d7db4 | ||
|
|
fedb632d0a | ||
|
|
244649b7d5 | ||
|
|
5ae0a4cf76 | ||
|
|
1d61a24f42 | ||
|
|
e8c36e527d | ||
|
|
96e9afe625 | ||
|
|
ea26e9a98b | ||
|
|
e5808c4cfb | ||
|
|
e866512367 | ||
|
|
f01e2ca039 | ||
|
|
a6eae69ffe | ||
|
|
1e03513f9a | ||
|
|
244dbb04f7 | ||
|
|
8718021469 | ||
|
|
70e506f0aa | ||
|
|
dc80a0762d | ||
|
|
74d3e177f0 | ||
|
|
71cccf1593 | ||
|
|
a99658074d | ||
|
|
2f6afdd8b4 | ||
|
|
831b31e563 | ||
|
|
177b2d0c19 | ||
|
|
b099ef07d6 | ||
|
|
0e0a2817a2 | ||
|
|
6920e58136 | ||
|
|
8bbe87f42d | ||
|
|
24110255cd | ||
|
|
95e41f368b | ||
|
|
e060bf4462 | ||
|
|
91e886d615 | ||
|
|
1b1489ff18 | ||
|
|
7d2824395f | ||
|
|
e35d44c01d | ||
|
|
3630825612 | ||
|
|
96bc110a68 | ||
|
|
5a5cf6460e | ||
|
|
6418b0379f | ||
|
|
e07a8caf58 | ||
|
|
b44bdd7f7b | ||
|
|
434716e1d3 | ||
|
|
890c0c041d | ||
|
|
46613aaf79 | ||
|
|
e452973fd2 | ||
|
|
f6f7511a4c | ||
|
|
231252516c | ||
|
|
5c5516f80e | ||
|
|
ac51bd581a | ||
|
|
a3f11567d9 | ||
|
|
98c4e35e3c | ||
|
|
03619324fc | ||
|
|
789606577a | ||
|
|
0fc5575c5b | ||
|
|
65eb078498 | ||
|
|
3e6b5bba71 | ||
|
|
cc32fa7358 | ||
|
|
2b2344652b | ||
|
|
b8ee03caff | ||
|
|
356243f08a | ||
|
|
4241a10673 | ||
|
|
6efb2b0ad4 | ||
|
|
c2b4621630 | ||
|
|
6d5985e1f2 | ||
|
|
7d2532be36 | ||
|
|
bd6dc17221 | ||
|
|
fed493c5fd | ||
|
|
2d11ea385c | ||
|
|
d0a43d431e | ||
|
|
e186c660b1 | ||
|
|
e47e5a2dcd | ||
|
|
1e5a50302f | ||
|
|
9549d557ea | ||
|
|
cf92fbb8aa | ||
|
|
7e80c84902 | ||
|
|
6b1fa3293d | ||
|
|
63d9a00bf1 | ||
|
|
2a07c5ded6 | ||
|
|
3cc7f43e8d | ||
|
|
a3fbc23c39 | ||
|
|
cb6d4d07b1 | ||
|
|
803291728c | ||
|
|
34fd1f7ab5 | ||
|
|
d0f095625c | ||
|
|
ce74a6685d | ||
|
|
ea8f6e611b | ||
|
|
1ad06ee6eb | ||
|
|
3b3f327a0d | ||
|
|
b9df7f70bb | ||
|
|
c746889bb0 | ||
|
|
9dbd006607 | ||
|
|
243f0ba6ce | ||
|
|
df3323a7cf | ||
|
|
0df618f813 | ||
|
|
aad40e38e1 | ||
|
|
476a89707a | ||
|
|
c7b99a1180 | ||
|
|
fcd6961441 | ||
|
|
ef345c5a7b | ||
|
|
191dc98f80 | ||
|
|
6f6a4bfc07 | ||
|
|
ec0a7b9034 | ||
|
|
1cd67790b9 | ||
|
|
737530a000 | ||
|
|
3e8c8547e1 | ||
|
|
236d2d699d | ||
|
|
2dc9468c27 | ||
|
|
8587b0426f | ||
|
|
664409b169 | ||
|
|
3c45a78090 | ||
|
|
375ca0cceb | ||
|
|
737b4a936e | ||
|
|
09099313e6 | ||
|
|
1bc00fd76d | ||
|
|
a0d2d81cf9 | ||
|
|
eea124370b | ||
|
|
b4f8dcb4bd | ||
|
|
f1e61ef85c | ||
|
|
908f9e2d24 | ||
|
|
2970ce8367 | ||
|
|
02f345d053 | ||
|
|
139bc86f3d | ||
|
|
e55ee7c32f | ||
|
|
f4e6495b5d | ||
|
|
c389bfb6ea | ||
|
|
f8b9ead3ee | ||
|
|
11de843626 | ||
|
|
e91abfd291 | ||
|
|
86d814cdde | ||
|
|
0188daf32c | ||
|
|
c9507be989 | ||
|
|
11dc2b4698 | ||
|
|
38d4ebbac7 | ||
|
|
2a8ed93bd4 | ||
|
|
3820c24836 | ||
|
|
38c1fdb14e | ||
|
|
1bbc9e2df6 | ||
|
|
816589b09a | ||
|
|
3e557447cb | ||
|
|
25e2d193e3 | ||
|
|
fe434cd3c9 | ||
|
|
33c39ab93c | ||
|
|
901b1fa561 | ||
|
|
df8a3cef6b | ||
|
|
6af9cdca24 | ||
|
|
c1bdd4fac7 | ||
|
|
2dc430d36e | ||
|
|
91a7c5ff6d | ||
|
|
cb495f526d | ||
|
|
f5353eff21 | ||
|
|
47db2c3673 | ||
|
|
5cb470b495 | ||
|
|
8c5f88fa4d | ||
|
|
ef3934ec8f | ||
|
|
3d7f1b53d9 | ||
|
|
a72d5f39db | ||
|
|
a6a40a1519 | ||
|
|
35c308731d | ||
|
|
c4a820b32a | ||
|
|
5af572ada0 | ||
|
|
0a6e837aaa | ||
|
|
98483890ee | ||
|
|
ef884f6d04 | ||
|
|
dd8e24f42e | ||
|
|
2292dc35fc |
@@ -4,18 +4,16 @@ jobs:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
4
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
@@ -4,12 +4,12 @@ about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
|
||||
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
|
||||
|
||||
<!--
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
|
||||
This is a bug report template. By following the instructions below and
|
||||
|
||||
488
CHANGES.md
488
CHANGES.md
@@ -1,3 +1,489 @@
|
||||
For the next release
|
||||
====================
|
||||
|
||||
Removal warning
|
||||
---------------
|
||||
|
||||
Some older clients used a
|
||||
[disallowed character](https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-register-email-requesttoken)
|
||||
(`:`) in the `client_secret` parameter of various endpoints. The incorrect
|
||||
behaviour was allowed for backwards compatibility, but is now being removed
|
||||
from Synapse as most users have updated their client. Further context can be
|
||||
found at [\#6766](https://github.com/matrix-org/synapse/issues/6766).
|
||||
|
||||
|
||||
Synapse 1.19.0 (2020-08-17)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.19.0rc1.
|
||||
|
||||
Removal warning
|
||||
---------------
|
||||
|
||||
As outlined in the [previous release](https://github.com/matrix-org/synapse/releases/tag/v1.18.0), we are no longer publishing Docker images with the `-py3` tag suffix. On top of that, we have also removed the `latest-py3` tag. Please see [the announcement in the upgrade notes for 1.18.0](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180).
|
||||
|
||||
|
||||
Synapse 1.19.0rc1 (2020-08-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add option to allow server admins to join rooms which fail complexity checks. Contributed by @lugino-emeritus. ([\#7902](https://github.com/matrix-org/synapse/issues/7902))
|
||||
- Add an option to purge room or not with delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel. ([\#7964](https://github.com/matrix-org/synapse/issues/7964))
|
||||
- Add rate limiting to users joining rooms. ([\#8008](https://github.com/matrix-org/synapse/issues/8008))
|
||||
- Add a `/health` endpoint to every configured HTTP listener that can be used as a health check endpoint by load balancers. ([\#8048](https://github.com/matrix-org/synapse/issues/8048))
|
||||
- Allow login to be blocked based on the values of SAML attributes. ([\#8052](https://github.com/matrix-org/synapse/issues/8052))
|
||||
- Allow guest access to the `GET /_matrix/client/r0/rooms/{room_id}/members` endpoint, according to MSC2689. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#7314](https://github.com/matrix-org/synapse/issues/7314))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977))
|
||||
- Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978))
|
||||
- Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980))
|
||||
- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996))
|
||||
- Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999))
|
||||
- Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- We no longer publish Docker images with the `-py3` tag suffix, as [announced in the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180). ([\#8056](https://github.com/matrix-org/synapse/issues/8056))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Document how to set up a client .well-known file and fix several pieces of outdated documentation. ([\#7899](https://github.com/matrix-org/synapse/issues/7899))
|
||||
- Improve workers docs. ([\#7990](https://github.com/matrix-org/synapse/issues/7990), [\#8000](https://github.com/matrix-org/synapse/issues/8000))
|
||||
- Fix typo in `docs/workers.md`. ([\#7992](https://github.com/matrix-org/synapse/issues/7992))
|
||||
- Add documentation for how to undo a room shutdown. ([\#7998](https://github.com/matrix-org/synapse/issues/7998), [\#8010](https://github.com/matrix-org/synapse/issues/8010))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Reduce the amount of whitespace in JSON stored and sent in responses. Contributed by David Vo. ([\#7372](https://github.com/matrix-org/synapse/issues/7372))
|
||||
- Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0. ([\#7936](https://github.com/matrix-org/synapse/issues/7936), [\#7979](https://github.com/matrix-org/synapse/issues/7979))
|
||||
- Convert various parts of the codebase to async/await. ([\#7947](https://github.com/matrix-org/synapse/issues/7947), [\#7948](https://github.com/matrix-org/synapse/issues/7948), [\#7949](https://github.com/matrix-org/synapse/issues/7949), [\#7951](https://github.com/matrix-org/synapse/issues/7951), [\#7963](https://github.com/matrix-org/synapse/issues/7963), [\#7973](https://github.com/matrix-org/synapse/issues/7973), [\#7975](https://github.com/matrix-org/synapse/issues/7975), [\#7976](https://github.com/matrix-org/synapse/issues/7976), [\#7981](https://github.com/matrix-org/synapse/issues/7981), [\#7987](https://github.com/matrix-org/synapse/issues/7987), [\#7989](https://github.com/matrix-org/synapse/issues/7989), [\#8003](https://github.com/matrix-org/synapse/issues/8003), [\#8014](https://github.com/matrix-org/synapse/issues/8014), [\#8016](https://github.com/matrix-org/synapse/issues/8016), [\#8027](https://github.com/matrix-org/synapse/issues/8027), [\#8031](https://github.com/matrix-org/synapse/issues/8031), [\#8032](https://github.com/matrix-org/synapse/issues/8032), [\#8035](https://github.com/matrix-org/synapse/issues/8035), [\#8042](https://github.com/matrix-org/synapse/issues/8042), [\#8044](https://github.com/matrix-org/synapse/issues/8044), [\#8045](https://github.com/matrix-org/synapse/issues/8045), [\#8061](https://github.com/matrix-org/synapse/issues/8061), [\#8062](https://github.com/matrix-org/synapse/issues/8062), [\#8063](https://github.com/matrix-org/synapse/issues/8063), [\#8066](https://github.com/matrix-org/synapse/issues/8066), [\#8069](https://github.com/matrix-org/synapse/issues/8069), [\#8070](https://github.com/matrix-org/synapse/issues/8070))
|
||||
- Move some database-related log lines from the default logger to the database/transaction loggers. ([\#7952](https://github.com/matrix-org/synapse/issues/7952))
|
||||
- Add a script to detect source code files using non-unix line terminators. ([\#7965](https://github.com/matrix-org/synapse/issues/7965), [\#7970](https://github.com/matrix-org/synapse/issues/7970))
|
||||
- Log the SAML session ID during creation. ([\#7971](https://github.com/matrix-org/synapse/issues/7971))
|
||||
- Implement new experimental push rules for some users. ([\#7997](https://github.com/matrix-org/synapse/issues/7997))
|
||||
- Remove redundant and unreliable signature check for v1 Identity Service lookup responses. ([\#8001](https://github.com/matrix-org/synapse/issues/8001))
|
||||
- Improve the performance of the register endpoint. ([\#8009](https://github.com/matrix-org/synapse/issues/8009))
|
||||
- Reduce less useful output in the newsfragment CI step. Add a link to the changelog section of the contributing guide on error. ([\#8024](https://github.com/matrix-org/synapse/issues/8024))
|
||||
- Rename storage layer objects to be more sensible. ([\#8033](https://github.com/matrix-org/synapse/issues/8033))
|
||||
- Change the default log config to reduce disk I/O and storage for new servers. ([\#8040](https://github.com/matrix-org/synapse/issues/8040))
|
||||
- Add an assertion on `prev_events` in `create_new_client_event`. ([\#8041](https://github.com/matrix-org/synapse/issues/8041))
|
||||
- Add a comment to `ServerContextFactory` about the use of `SSLv23_METHOD`. ([\#8043](https://github.com/matrix-org/synapse/issues/8043))
|
||||
- Log `OPTIONS` requests at `DEBUG` rather than `INFO` level to reduce amount logged at `INFO`. ([\#8049](https://github.com/matrix-org/synapse/issues/8049))
|
||||
- Reduce amount of outbound request logging at `INFO` level. ([\#8050](https://github.com/matrix-org/synapse/issues/8050))
|
||||
- It is no longer necessary to explicitly define `filters` in the logging configuration. (Continuing to do so is redundant but harmless.) ([\#8051](https://github.com/matrix-org/synapse/issues/8051))
|
||||
- Add and improve type hints. ([\#8058](https://github.com/matrix-org/synapse/issues/8058), [\#8064](https://github.com/matrix-org/synapse/issues/8064), [\#8060](https://github.com/matrix-org/synapse/issues/8060), [\#8067](https://github.com/matrix-org/synapse/issues/8067))
|
||||
|
||||
|
||||
Synapse 1.18.0 (2020-07-30)
|
||||
===========================
|
||||
|
||||
Deprecation Warnings
|
||||
--------------------
|
||||
|
||||
### Docker Tags with `-py3` Suffix
|
||||
|
||||
From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
|
||||
|
||||
On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
|
||||
|
||||
Scripts relying on the `-py3` suffix will need to be updated.
|
||||
|
||||
|
||||
### TCP-based Replication
|
||||
|
||||
When setting up worker processes, we now recommend the use of a Redis server for replication. The old direct TCP connection method is deprecated and will be removed in a future release. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.18.0/docs/workers.md) for more details.
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update worker docs with latest enhancements. ([\#7969](https://github.com/matrix-org/synapse/issues/7969))
|
||||
|
||||
|
||||
Synapse 1.18.0rc2 (2020-07-28)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix an `AssertionError` exception introduced in v1.18.0rc1. ([\#7876](https://github.com/matrix-org/synapse/issues/7876))
|
||||
- Fix experimental support for moving typing off master when worker is restarted, which is broken in v1.18.0rc1. ([\#7967](https://github.com/matrix-org/synapse/issues/7967))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Further optimise queueing of inbound replication commands. ([\#7876](https://github.com/matrix-org/synapse/issues/7876))
|
||||
|
||||
|
||||
Synapse 1.18.0rc1 (2020-07-27)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Include room states on invite events that are sent to application services. Contributed by @Sorunome. ([\#6455](https://github.com/matrix-org/synapse/issues/6455))
|
||||
- Add delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel. ([\#7613](https://github.com/matrix-org/synapse/issues/7613), [\#7953](https://github.com/matrix-org/synapse/issues/7953))
|
||||
- Add experimental support for running multiple federation sender processes. ([\#7798](https://github.com/matrix-org/synapse/issues/7798))
|
||||
- Add the option to validate the `iss` and `aud` claims for JWT logins. ([\#7827](https://github.com/matrix-org/synapse/issues/7827))
|
||||
- Add support for handling registration requests across multiple client reader workers. ([\#7830](https://github.com/matrix-org/synapse/issues/7830))
|
||||
- Add an admin API to list the users in a room. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#7842](https://github.com/matrix-org/synapse/issues/7842))
|
||||
- Allow email subjects to be customised through Synapse's configuration. ([\#7846](https://github.com/matrix-org/synapse/issues/7846))
|
||||
- Add the ability to re-activate an account from the admin API. ([\#7847](https://github.com/matrix-org/synapse/issues/7847), [\#7908](https://github.com/matrix-org/synapse/issues/7908))
|
||||
- Add experimental support for running multiple pusher workers. ([\#7855](https://github.com/matrix-org/synapse/issues/7855))
|
||||
- Add experimental support for moving typing off master. ([\#7869](https://github.com/matrix-org/synapse/issues/7869), [\#7959](https://github.com/matrix-org/synapse/issues/7959))
|
||||
- Report CPU metrics to prometheus for time spent processing replication commands. ([\#7879](https://github.com/matrix-org/synapse/issues/7879))
|
||||
- Support oEmbed for media previews. ([\#7920](https://github.com/matrix-org/synapse/issues/7920))
|
||||
- Abort federation requests where the client disconnects before the ratelimiter expires. ([\#7930](https://github.com/matrix-org/synapse/issues/7930))
|
||||
- Cache responses to `/_matrix/federation/v1/state_ids` to reduce duplicated work. ([\#7931](https://github.com/matrix-org/synapse/issues/7931))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix detection of out of sync remote device lists when receiving events from remote users. ([\#7815](https://github.com/matrix-org/synapse/issues/7815))
|
||||
- Fix bug where Synapse fails to process an incoming event over federation if the server is missing too much of the event's auth chain. ([\#7817](https://github.com/matrix-org/synapse/issues/7817))
|
||||
- Fix a bug causing Synapse to misinterpret the value `off` for `encryption_enabled_by_default_for_room_type` in its configuration file(s) if that value isn't surrounded by quotes. This bug was introduced in v1.16.0. ([\#7822](https://github.com/matrix-org/synapse/issues/7822))
|
||||
- Fix bug where we did not always pass in `app_name` or `server_name` to email templates, including e.g. for registration emails. ([\#7829](https://github.com/matrix-org/synapse/issues/7829))
|
||||
- Errors which occur while using the non-standard JWT login now return the proper error: `403 Forbidden` with an error code of `M_FORBIDDEN`. ([\#7844](https://github.com/matrix-org/synapse/issues/7844))
|
||||
- Fix "AttributeError: 'str' object has no attribute 'get'" error message when applying per-room message retention policies. The bug was introduced in Synapse 1.7.0. ([\#7850](https://github.com/matrix-org/synapse/issues/7850))
|
||||
- Fix a bug introduced in Synapse 1.10.0 which could cause a "no create event in auth events" error during room creation. ([\#7854](https://github.com/matrix-org/synapse/issues/7854))
|
||||
- Fix a bug which allowed empty rooms to be rejoined over federation. ([\#7859](https://github.com/matrix-org/synapse/issues/7859))
|
||||
- Fix 'Unable to find a suitable guest user ID' error when using multiple client_reader workers. ([\#7866](https://github.com/matrix-org/synapse/issues/7866))
|
||||
- Fix a long standing bug where the tracing of async functions with opentracing was broken. ([\#7872](https://github.com/matrix-org/synapse/issues/7872), [\#7961](https://github.com/matrix-org/synapse/issues/7961))
|
||||
- Fix "TypeError in `synapse.notifier`" exceptions. ([\#7880](https://github.com/matrix-org/synapse/issues/7880))
|
||||
- Fix deprecation warning due to invalid escape sequences. ([\#7895](https://github.com/matrix-org/synapse/issues/7895))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Base docker image on Debian Buster rather than Alpine Linux. Contributed by @maquis196. ([\#7839](https://github.com/matrix-org/synapse/issues/7839))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Provide instructions on using `register_new_matrix_user` via docker. ([\#7885](https://github.com/matrix-org/synapse/issues/7885))
|
||||
- Change the sample config postgres user section to use `synapse_user` instead of `synapse` to align with the documentation. ([\#7889](https://github.com/matrix-org/synapse/issues/7889))
|
||||
- Reorder database paragraphs to promote postgres over sqlite. ([\#7933](https://github.com/matrix-org/synapse/issues/7933))
|
||||
- Update the dates of ACME v1's end of life in [`ACME.md`](https://github.com/matrix-org/synapse/blob/master/docs/ACME.md). ([\#7934](https://github.com/matrix-org/synapse/issues/7934))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove unused `synapse_replication_tcp_resource_invalidate_cache` prometheus metric. ([\#7878](https://github.com/matrix-org/synapse/issues/7878))
|
||||
- Remove Ubuntu Eoan from the list of `.deb` packages that we build as it is now end-of-life. Contributed by @gary-kim. ([\#7888](https://github.com/matrix-org/synapse/issues/7888))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Switch parts of the codebase from `simplejson` to the standard library `json`. ([\#7802](https://github.com/matrix-org/synapse/issues/7802))
|
||||
- Add type hints to the http server code and remove an unused parameter. ([\#7813](https://github.com/matrix-org/synapse/issues/7813))
|
||||
- Add type hints to synapse.api.errors module. ([\#7820](https://github.com/matrix-org/synapse/issues/7820))
|
||||
- Ensure that calls to `json.dumps` are compatible with the standard library json. ([\#7836](https://github.com/matrix-org/synapse/issues/7836))
|
||||
- Remove redundant `retry_on_integrity_error` wrapper for event persistence code. ([\#7848](https://github.com/matrix-org/synapse/issues/7848))
|
||||
- Consistently use `db_to_json` to convert from database values to JSON objects. ([\#7849](https://github.com/matrix-org/synapse/issues/7849))
|
||||
- Convert various parts of the codebase to async/await. ([\#7851](https://github.com/matrix-org/synapse/issues/7851), [\#7860](https://github.com/matrix-org/synapse/issues/7860), [\#7868](https://github.com/matrix-org/synapse/issues/7868), [\#7871](https://github.com/matrix-org/synapse/issues/7871), [\#7873](https://github.com/matrix-org/synapse/issues/7873), [\#7874](https://github.com/matrix-org/synapse/issues/7874), [\#7884](https://github.com/matrix-org/synapse/issues/7884), [\#7912](https://github.com/matrix-org/synapse/issues/7912), [\#7935](https://github.com/matrix-org/synapse/issues/7935), [\#7939](https://github.com/matrix-org/synapse/issues/7939), [\#7942](https://github.com/matrix-org/synapse/issues/7942), [\#7944](https://github.com/matrix-org/synapse/issues/7944))
|
||||
- Add support for handling registration requests across multiple client reader workers. ([\#7853](https://github.com/matrix-org/synapse/issues/7853))
|
||||
- Small performance improvement in typing processing. ([\#7856](https://github.com/matrix-org/synapse/issues/7856))
|
||||
- The default value of `filter_timeline_limit` was changed from -1 (no limit) to 100. ([\#7858](https://github.com/matrix-org/synapse/issues/7858))
|
||||
- Optimise queueing of inbound replication commands. ([\#7861](https://github.com/matrix-org/synapse/issues/7861))
|
||||
- Add some type annotations to `HomeServer` and `BaseHandler`. ([\#7870](https://github.com/matrix-org/synapse/issues/7870))
|
||||
- Clean up `PreserveLoggingContext`. ([\#7877](https://github.com/matrix-org/synapse/issues/7877))
|
||||
- Change "unknown room version" logging from 'error' to 'warning'. ([\#7881](https://github.com/matrix-org/synapse/issues/7881))
|
||||
- Stop using `device_max_stream_id` table and just use `device_inbox.stream_id`. ([\#7882](https://github.com/matrix-org/synapse/issues/7882))
|
||||
- Return an empty body for OPTIONS requests. ([\#7886](https://github.com/matrix-org/synapse/issues/7886))
|
||||
- Fix typo in generated config file. Contributed by @ThiefMaster. ([\#7890](https://github.com/matrix-org/synapse/issues/7890))
|
||||
- Import ABC from `collections.abc` for Python 3.10 compatibility. ([\#7892](https://github.com/matrix-org/synapse/issues/7892))
|
||||
- Remove unused functions `time_function`, `trace_function`, `get_previous_frames`
|
||||
and `get_previous_frame` from `synapse.logging.utils` module. ([\#7897](https://github.com/matrix-org/synapse/issues/7897))
|
||||
- Lint the `contrib/` directory in CI and linting scripts, add `synctl` to the linting script for consistency with CI. ([\#7914](https://github.com/matrix-org/synapse/issues/7914))
|
||||
- Use Element CSS and logo in notification emails when app name is Element. ([\#7919](https://github.com/matrix-org/synapse/issues/7919))
|
||||
- Optimisation to /sync handling: skip serializing the response if the client has already disconnected. ([\#7927](https://github.com/matrix-org/synapse/issues/7927))
|
||||
- When a client disconnects, don't log it as 'Error processing request'. ([\#7928](https://github.com/matrix-org/synapse/issues/7928))
|
||||
- Add debugging to `/sync` response generation (disabled by default). ([\#7929](https://github.com/matrix-org/synapse/issues/7929))
|
||||
- Update comments that refer to Deferreds for async functions. ([\#7945](https://github.com/matrix-org/synapse/issues/7945))
|
||||
- Simplify error handling in federation handler. ([\#7950](https://github.com/matrix-org/synapse/issues/7950))
|
||||
|
||||
|
||||
Synapse 1.17.0 (2020-07-13)
|
||||
===========================
|
||||
|
||||
Synapse 1.17.0 is identical to 1.17.0rc1, with the addition of the fix that was included in 1.16.1.
|
||||
|
||||
|
||||
Synapse 1.16.1 (2020-07-10)
|
||||
===========================
|
||||
|
||||
In some distributions of Synapse 1.16.0, we incorrectly included a database migration which added a new, unused table. This release removes the redundant table.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Drop table `local_rejections_stream` which was incorrectly added in Synapse 1.16.0. ([\#7816](https://github.com/matrix-org/synapse/issues/7816), [b1beb3ff5](https://github.com/matrix-org/synapse/commit/b1beb3ff5))
|
||||
|
||||
|
||||
Synapse 1.17.0rc1 (2020-07-09)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix inconsistent handling of upper and lower case in email addresses when used as identifiers for login, etc. Contributed by @dklimpel. ([\#7021](https://github.com/matrix-org/synapse/issues/7021))
|
||||
- Fix "Tried to close a non-active scope!" error messages when opentracing is enabled. ([\#7732](https://github.com/matrix-org/synapse/issues/7732))
|
||||
- Fix incorrect error message when database CTYPE was set incorrectly. ([\#7760](https://github.com/matrix-org/synapse/issues/7760))
|
||||
- Fix to not ignore `set_tweak` actions in Push Rules that have no `value`, as permitted by the specification. ([\#7766](https://github.com/matrix-org/synapse/issues/7766))
|
||||
- Fix synctl to handle empty config files correctly. Contributed by @kotovalexarian. ([\#7779](https://github.com/matrix-org/synapse/issues/7779))
|
||||
- Fixes a long standing bug in worker mode where worker information was saved in the devices table instead of the original IP address and user agent. ([\#7797](https://github.com/matrix-org/synapse/issues/7797))
|
||||
- Fix 'stuck invites' which happen when we are unable to reject a room invite received over federation. ([\#7804](https://github.com/matrix-org/synapse/issues/7804), [\#7809](https://github.com/matrix-org/synapse/issues/7809), [\#7810](https://github.com/matrix-org/synapse/issues/7810))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Include libwebp in the Docker file to properly handle webp image uploads. ([\#7791](https://github.com/matrix-org/synapse/issues/7791))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Improve the documentation of the non-standard JSON web token login type. ([\#7776](https://github.com/matrix-org/synapse/issues/7776))
|
||||
- Update doc links for caddy. Contributed by Nicolai Søborg. ([\#7789](https://github.com/matrix-org/synapse/issues/7789))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Refactor getting replication updates from database. ([\#7740](https://github.com/matrix-org/synapse/issues/7740))
|
||||
- Send push notifications with a high or low priority depending upon whether they may generate user-observable effects. ([\#7765](https://github.com/matrix-org/synapse/issues/7765))
|
||||
- Use symbolic names for replication stream names. ([\#7768](https://github.com/matrix-org/synapse/issues/7768))
|
||||
- Add early returns to `_check_for_soft_fail`. ([\#7769](https://github.com/matrix-org/synapse/issues/7769))
|
||||
- Fix up `synapse.handlers.federation` to pass mypy. ([\#7770](https://github.com/matrix-org/synapse/issues/7770))
|
||||
- Convert the appserver handler to async/await. ([\#7775](https://github.com/matrix-org/synapse/issues/7775))
|
||||
- Allow to use higher versions of prometheus_client <0.9.0 which are expected to introduce no breaking changes. Contributed by Oliver Kurz. ([\#7780](https://github.com/matrix-org/synapse/issues/7780))
|
||||
- Update linting scripts and codebase to be compatible with `isort` v5. ([\#7786](https://github.com/matrix-org/synapse/issues/7786))
|
||||
- Stop populating unused table `local_invites`. ([\#7793](https://github.com/matrix-org/synapse/issues/7793))
|
||||
- Ensure that strings (not bytes) are passed into JSON serialization. ([\#7799](https://github.com/matrix-org/synapse/issues/7799))
|
||||
- Switch from simplejson to the standard library json. ([\#7800](https://github.com/matrix-org/synapse/issues/7800))
|
||||
- Add `signing_key` property to `HomeServer` to save code duplication. ([\#7805](https://github.com/matrix-org/synapse/issues/7805))
|
||||
- Improve stacktraces from exceptions in background processes. ([\#7808](https://github.com/matrix-org/synapse/issues/7808))
|
||||
- Fix various spelling errors in comments and log lines. ([\#7811](https://github.com/matrix-org/synapse/issues/7811))
|
||||
|
||||
|
||||
Synapse 1.16.0 (2020-07-08)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.16.0rc2.
|
||||
|
||||
Note that this release deprecates the `m.login.jwt` login method, renaming it
|
||||
to `org.matrix.login.jwt`, as `m.login.jwt` is not part of the Matrix spec.
|
||||
Otherwise the behaviour is identical. Synapse will accept both names for now,
|
||||
but this may change in a future release.
|
||||
|
||||
Synapse 1.16.0rc2 (2020-07-02)
|
||||
==============================
|
||||
|
||||
Synapse 1.16.0rc2 includes the security fixes released with Synapse 1.15.2.
|
||||
Please see [below](#synapse-1152-2020-07-02) for more details.
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update postgres image in example `docker-compose.yaml` to tag `12-alpine`. ([\#7696](https://github.com/matrix-org/synapse/issues/7696))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add some metrics for inbound and outbound federation latencies: `synapse_federation_server_pdu_process_time` and `synapse_event_processing_lag_by_event`. ([\#7771](https://github.com/matrix-org/synapse/issues/7771))
|
||||
|
||||
|
||||
Synapse 1.15.2 (2020-07-02)
|
||||
===========================
|
||||
|
||||
Due to the two security issues highlighted below, server administrators are
|
||||
encouraged to update Synapse. We are not aware of these vulnerabilities being
|
||||
exploited in the wild.
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
* A malicious homeserver could force Synapse to reset the state in a room to a
|
||||
small subset of the correct state. This affects all Synapse deployments which
|
||||
federate with untrusted servers. ([96e9afe6](https://github.com/matrix-org/synapse/commit/96e9afe62500310977dc3cbc99a8d16d3d2fa15c))
|
||||
* HTML pages served via Synapse were vulnerable to clickjacking attacks. This
|
||||
predominantly affects homeservers with single-sign-on enabled, but all server
|
||||
administrators are encouraged to upgrade. ([ea26e9a9](https://github.com/matrix-org/synapse/commit/ea26e9a98b0541fc886a1cb826a38352b7599dbe))
|
||||
|
||||
This was reported by [Quentin Gliech](https://sandhose.fr/).
|
||||
|
||||
|
||||
Synapse 1.16.0rc1 (2020-07-01)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add an option to enable encryption by default for new rooms. ([\#7639](https://github.com/matrix-org/synapse/issues/7639))
|
||||
- Add support for running multiple media repository workers. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.16.0/docs/workers.md) for instructions. ([\#7706](https://github.com/matrix-org/synapse/issues/7706))
|
||||
- Media can now be marked as safe from quarantined. ([\#7718](https://github.com/matrix-org/synapse/issues/7718))
|
||||
- Expand the configuration options for auto-join rooms. ([\#7763](https://github.com/matrix-org/synapse/issues/7763))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Remove `user_id` from the response to `GET /_matrix/client/r0/presence/{userId}/status` to match the specification. ([\#7606](https://github.com/matrix-org/synapse/issues/7606))
|
||||
- In worker mode, ensure that replicated data has not already been received. ([\#7648](https://github.com/matrix-org/synapse/issues/7648))
|
||||
- Fix intermittent exception during startup, introduced in Synapse 1.14.0. ([\#7663](https://github.com/matrix-org/synapse/issues/7663))
|
||||
- Include a user-agent for federation and well-known requests. ([\#7677](https://github.com/matrix-org/synapse/issues/7677))
|
||||
- Accept the proper field (`phone`) for the `m.id.phone` identifier type. The legacy field of `number` is still accepted as a fallback. Bug introduced in v0.20.0. ([\#7687](https://github.com/matrix-org/synapse/issues/7687))
|
||||
- Fix "Starting db txn 'get_completed_ui_auth_stages' from sentinel context" warning. The bug was introduced in 1.13.0. ([\#7688](https://github.com/matrix-org/synapse/issues/7688))
|
||||
- Compare the URI and method during user interactive authentication (instead of the URI twice). Bug introduced in 1.13.0. ([\#7689](https://github.com/matrix-org/synapse/issues/7689))
|
||||
- Fix a long standing bug where the response to the `GET room_keys/version` endpoint had the incorrect type for the `etag` field. ([\#7691](https://github.com/matrix-org/synapse/issues/7691))
|
||||
- Fix logged error during device resync in opentracing. Broke in v1.14.0. ([\#7698](https://github.com/matrix-org/synapse/issues/7698))
|
||||
- Do not break push rule evaluation when receiving an event with a non-string body. This is a long-standing bug. ([\#7701](https://github.com/matrix-org/synapse/issues/7701))
|
||||
- Fixs a long standing bug which resulted in an exception: "TypeError: argument of type 'ObservableDeferred' is not iterable". ([\#7708](https://github.com/matrix-org/synapse/issues/7708))
|
||||
- The `synapse_port_db` script no longer fails when the `ui_auth_sessions` table is non-empty. This bug has existed since v1.13.0. ([\#7711](https://github.com/matrix-org/synapse/issues/7711))
|
||||
- Synapse will now fetch media from the proper specified URL (using the r0 prefix instead of the unspecified v1). ([\#7714](https://github.com/matrix-org/synapse/issues/7714))
|
||||
- Fix the tables ignored by `synapse_port_db` to be in sync the current database schema. ([\#7717](https://github.com/matrix-org/synapse/issues/7717))
|
||||
- Fix missing `Content-Length` on HTTP responses from the metrics handler. ([\#7730](https://github.com/matrix-org/synapse/issues/7730))
|
||||
- Fix large state resolutions from stalling Synapse for seconds at a time. ([\#7735](https://github.com/matrix-org/synapse/issues/7735), [\#7746](https://github.com/matrix-org/synapse/issues/7746))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Spelling correction in sample_config.yaml. ([\#7652](https://github.com/matrix-org/synapse/issues/7652))
|
||||
- Added instructions for how to use Keycloak via OpenID Connect to authenticate with Synapse. ([\#7659](https://github.com/matrix-org/synapse/issues/7659))
|
||||
- Corrected misspelling of PostgreSQL. ([\#7724](https://github.com/matrix-org/synapse/issues/7724))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate `m.login.jwt` login method in favour of `org.matrix.login.jwt`, as `m.login.jwt` is not part of the Matrix spec. ([\#7675](https://github.com/matrix-org/synapse/issues/7675))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Refactor getting replication updates from database. ([\#7636](https://github.com/matrix-org/synapse/issues/7636))
|
||||
- Clean-up the login fallback code. ([\#7657](https://github.com/matrix-org/synapse/issues/7657))
|
||||
- Increase the default SAML session expiry time to 15 minutes. ([\#7664](https://github.com/matrix-org/synapse/issues/7664))
|
||||
- Convert the device message and pagination handlers to async/await. ([\#7678](https://github.com/matrix-org/synapse/issues/7678))
|
||||
- Convert typing handler to async/await. ([\#7679](https://github.com/matrix-org/synapse/issues/7679))
|
||||
- Require `parameterized` package version to be at least 0.7.0. ([\#7680](https://github.com/matrix-org/synapse/issues/7680))
|
||||
- Refactor handling of `listeners` configuration settings. ([\#7681](https://github.com/matrix-org/synapse/issues/7681))
|
||||
- Replace uses of `six.iterkeys`/`iteritems`/`itervalues` with `keys()`/`items()`/`values()`. ([\#7692](https://github.com/matrix-org/synapse/issues/7692))
|
||||
- Add support for using `rust-python-jaeger-reporter` library to reduce jaeger tracing overhead. ([\#7697](https://github.com/matrix-org/synapse/issues/7697))
|
||||
- Make Tox actions work on Debian 10. ([\#7703](https://github.com/matrix-org/synapse/issues/7703))
|
||||
- Replace all remaining uses of `six` with native Python 3 equivalents. Contributed by @ilmari. ([\#7704](https://github.com/matrix-org/synapse/issues/7704))
|
||||
- Fix broken link in sample config. ([\#7712](https://github.com/matrix-org/synapse/issues/7712))
|
||||
- Speed up state res v2 across large state differences. ([\#7725](https://github.com/matrix-org/synapse/issues/7725))
|
||||
- Convert directory handler to async/await. ([\#7727](https://github.com/matrix-org/synapse/issues/7727))
|
||||
- Move `flake8` to the end of `scripts-dev/lint.sh` as it takes the longest and could cause the script to exit early. ([\#7738](https://github.com/matrix-org/synapse/issues/7738))
|
||||
- Explain the "test" conditional requirement for dependencies is not all of the modules necessary to run the unit tests. ([\#7751](https://github.com/matrix-org/synapse/issues/7751))
|
||||
- Add some metrics for inbound and outbound federation latencies: `synapse_federation_server_pdu_process_time` and `synapse_event_processing_lag_by_event`. ([\#7755](https://github.com/matrix-org/synapse/issues/7755))
|
||||
|
||||
|
||||
Synapse 1.15.1 (2020-06-16)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.15.0 that would crash Synapse on start when using certain password auth providers. ([\#7684](https://github.com/matrix-org/synapse/issues/7684))
|
||||
- Fix a bug introduced in v1.15.0 which meant that some 3PID management endpoints were not accessible on the correct URL. ([\#7685](https://github.com/matrix-org/synapse/issues/7685))
|
||||
|
||||
|
||||
Synapse 1.15.0 (2020-06-11)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.15.0rc1 (2020-06-09)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Advertise support for Client-Server API r0.6.0 and remove related unstable feature flags. ([\#6585](https://github.com/matrix-org/synapse/issues/6585))
|
||||
- Add an option to disable autojoining rooms for guest accounts. ([\#6637](https://github.com/matrix-org/synapse/issues/6637))
|
||||
- For SAML authentication, add the ability to pass email addresses to be added to new users' accounts via SAML attributes. Contributed by Christopher Cooper. ([\#7385](https://github.com/matrix-org/synapse/issues/7385))
|
||||
- Add admin APIs to allow server admins to manage users' devices. Contributed by @dklimpel. ([\#7481](https://github.com/matrix-org/synapse/issues/7481))
|
||||
- Add support for generating thumbnails for WebP images. Previously, users would see an empty box instead of preview image. Contributed by @WGH-. ([\#7586](https://github.com/matrix-org/synapse/issues/7586))
|
||||
- Support the standardized `m.login.sso` user-interactive authentication flow. ([\#7630](https://github.com/matrix-org/synapse/issues/7630))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Allow new users to be registered via the admin API even if the monthly active user limit has been reached. Contributed by @dklimpel. ([\#7263](https://github.com/matrix-org/synapse/issues/7263))
|
||||
- Fix email notifications not being enabled for new users when created via the Admin API. ([\#7267](https://github.com/matrix-org/synapse/issues/7267))
|
||||
- Fix str placeholders in an instance of `PrepareDatabaseException`. Introduced in Synapse v1.8.0. ([\#7575](https://github.com/matrix-org/synapse/issues/7575))
|
||||
- Fix a bug in automatic user creation during first time login with `m.login.jwt`. Regression in v1.6.0. Contributed by @olof. ([\#7585](https://github.com/matrix-org/synapse/issues/7585))
|
||||
- Fix a bug causing the cross-signing keys to be ignored when resyncing a device list. ([\#7594](https://github.com/matrix-org/synapse/issues/7594))
|
||||
- Fix metrics failing when there is a large number of active background processes. ([\#7597](https://github.com/matrix-org/synapse/issues/7597))
|
||||
- Fix bug where returning rooms for a group would fail if it included a room that the server was not in. ([\#7599](https://github.com/matrix-org/synapse/issues/7599))
|
||||
- Fix duplicate key violation when persisting read markers. ([\#7607](https://github.com/matrix-org/synapse/issues/7607))
|
||||
- Prevent an entire iteration of the device list resync loop from failing if one server responds with a malformed result. ([\#7609](https://github.com/matrix-org/synapse/issues/7609))
|
||||
- Fix exceptions when fetching events from a remote host fails. ([\#7622](https://github.com/matrix-org/synapse/issues/7622))
|
||||
- Make `synctl restart` start synapse if it wasn't running. ([\#7624](https://github.com/matrix-org/synapse/issues/7624))
|
||||
- Pass device information through to the login endpoint when using the login fallback. ([\#7629](https://github.com/matrix-org/synapse/issues/7629))
|
||||
- Advertise the `m.login.token` login flow when OpenID Connect is enabled. ([\#7631](https://github.com/matrix-org/synapse/issues/7631))
|
||||
- Fix bug in account data replication stream. ([\#7656](https://github.com/matrix-org/synapse/issues/7656))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update the OpenBSD installation instructions. ([\#7587](https://github.com/matrix-org/synapse/issues/7587))
|
||||
- Advertise Python 3.8 support in `setup.py`. ([\#7602](https://github.com/matrix-org/synapse/issues/7602))
|
||||
- Add a link to `#synapse:matrix.org` in the troubleshooting section of the README. ([\#7603](https://github.com/matrix-org/synapse/issues/7603))
|
||||
- Clarifications to the admin api documentation. ([\#7647](https://github.com/matrix-org/synapse/issues/7647))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Convert the identity handler to async/await. ([\#7561](https://github.com/matrix-org/synapse/issues/7561))
|
||||
- Improve query performance for fetching state from a PostgreSQL database. Contributed by @ilmari. ([\#7567](https://github.com/matrix-org/synapse/issues/7567))
|
||||
- Speed up processing of federation stream RDATA rows. ([\#7584](https://github.com/matrix-org/synapse/issues/7584))
|
||||
- Add comment to systemd example to show postgresql dependency. ([\#7591](https://github.com/matrix-org/synapse/issues/7591))
|
||||
- Refactor `Ratelimiter` to limit the amount of expensive config value accesses. ([\#7595](https://github.com/matrix-org/synapse/issues/7595))
|
||||
- Convert groups handlers to async/await. ([\#7600](https://github.com/matrix-org/synapse/issues/7600))
|
||||
- Clean up exception handling in `SAML2ResponseResource`. ([\#7614](https://github.com/matrix-org/synapse/issues/7614))
|
||||
- Check that all asynchronous tasks succeed and general cleanup of `MonthlyActiveUsersTestCase` and `TestMauLimit`. ([\#7619](https://github.com/matrix-org/synapse/issues/7619))
|
||||
- Convert `get_user_id_by_threepid` to async/await. ([\#7620](https://github.com/matrix-org/synapse/issues/7620))
|
||||
- Switch to upstream `dh-virtualenv` rather than our fork for Debian package builds. ([\#7621](https://github.com/matrix-org/synapse/issues/7621))
|
||||
- Update CI scripts to check the number in the newsfile fragment. ([\#7623](https://github.com/matrix-org/synapse/issues/7623))
|
||||
- Check if the localpart of a Matrix ID is reserved for guest users earlier in the registration flow, as well as when responding to requests to `/register/available`. ([\#7625](https://github.com/matrix-org/synapse/issues/7625))
|
||||
- Minor cleanups to OpenID Connect integration. ([\#7628](https://github.com/matrix-org/synapse/issues/7628))
|
||||
- Attempt to fix flaky test: `PhoneHomeStatsTestCase.test_performance_100`. ([\#7634](https://github.com/matrix-org/synapse/issues/7634))
|
||||
- Fix typos of `m.olm.curve25519-aes-sha2` and `m.megolm.v1.aes-sha2` in comments, test files. ([\#7637](https://github.com/matrix-org/synapse/issues/7637))
|
||||
- Convert user directory, state deltas, and stats handlers to async/await. ([\#7640](https://github.com/matrix-org/synapse/issues/7640))
|
||||
- Remove some unused constants. ([\#7644](https://github.com/matrix-org/synapse/issues/7644))
|
||||
- Fix type information on `assert_*_is_admin` methods. ([\#7645](https://github.com/matrix-org/synapse/issues/7645))
|
||||
- Convert registration handler to async/await. ([\#7649](https://github.com/matrix-org/synapse/issues/7649))
|
||||
|
||||
|
||||
Synapse 1.14.0 (2020-05-28)
|
||||
===========================
|
||||
|
||||
@@ -53,7 +539,7 @@ Bugfixes
|
||||
- Fix bug where a local user leaving a room could fail under rare circumstances. ([\#7548](https://github.com/matrix-org/synapse/issues/7548))
|
||||
- Fix "Missing RelayState parameter" error when using user interactive authentication with SAML for some SAML providers. ([\#7552](https://github.com/matrix-org/synapse/issues/7552))
|
||||
- Fix exception `'GenericWorkerReplicationHandler' object has no attribute 'send_federation_ack'`, introduced in v1.13.0. ([\#7564](https://github.com/matrix-org/synapse/issues/7564))
|
||||
- `synctl` now warns if it was unable to stop Synapse and will not attempt to start Synapse if nothing was stopped. Contributed by Romain Bouyé. ([\#6590](https://github.com/matrix-org/synapse/issues/6590))
|
||||
- `synctl` now warns if it was unable to stop Synapse and will not attempt to start Synapse if nothing was stopped. Contributed by Romain Bouyé. ([\#6598](https://github.com/matrix-org/synapse/issues/6598))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
|
||||
183
INSTALL.md
183
INSTALL.md
@@ -1,10 +1,12 @@
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Picking a database engine](#picking-a-database-engine)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
@@ -27,6 +29,25 @@ that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](docs/federate.md).
|
||||
|
||||
# Picking a database engine
|
||||
|
||||
Synapse offers two database engines:
|
||||
* [PostgreSQL](https://www.postgresql.org)
|
||||
* [SQLite](https://sqlite.org/)
|
||||
|
||||
Almost all installations should opt to use PostgreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
By default Synapse uses SQLite and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
## Installing from source
|
||||
@@ -180,35 +201,41 @@ sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
|
||||
#### OpenBSD
|
||||
|
||||
Installing prerequisites on OpenBSD:
|
||||
A port of Synapse is available under `net/synapse`. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
To be able to build Synapse's dependency on python the `WRKOBJDIR`
|
||||
(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
|
||||
mounted with `wxallowed` (cf. `mount(8)`).
|
||||
|
||||
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||
default OpenBSD installation is mounted with `wxallowed`):
|
||||
|
||||
```
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt jpeg
|
||||
doas mkdir /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||
settings require a slightly more difficult installation process.
|
||||
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||
configured in `/etc/mk.conf`:
|
||||
|
||||
(XXX: I suspect this is out of date)
|
||||
```
|
||||
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
|
||||
new user called `_synapse` and set that directory as the new user's home.
|
||||
This is required because, by default, OpenBSD only allows binaries which need
|
||||
write and execute permissions on the same memory space to be run from
|
||||
`/usr/local`.
|
||||
2. `su` to the new `_synapse` user and change to their home directory.
|
||||
3. Create a new virtualenv: `virtualenv -p python3 ~/.synapse`
|
||||
4. Source the virtualenv configuration located at
|
||||
`/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
|
||||
using the `.` command, rather than `bash`'s `source`.
|
||||
5. Optionally, use `pip` to install `lxml`, which Synapse needs to parse
|
||||
webpages for their titles.
|
||||
6. Use `pip` to install this repository: `pip install matrix-synapse`
|
||||
7. Optionally, change `_synapse`'s shell to `/bin/false` to reduce the
|
||||
chance of a compromised Synapse server being used to take over your box.
|
||||
Setting the `WRKOBJDIR` for building python:
|
||||
|
||||
After this, you may proceed with the rest of the install directions.
|
||||
```
|
||||
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||
```
|
||||
|
||||
Building Synapse:
|
||||
|
||||
```
|
||||
cd /usr/ports/net/synapse
|
||||
make install
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
@@ -228,9 +255,9 @@ for a number of platforms.
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||
information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
@@ -238,7 +265,8 @@ https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
@@ -271,22 +299,27 @@ The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
#### Downstream Debian/Ubuntu packages
|
||||
#### Downstream Debian packages
|
||||
|
||||
For `buster` and `sid`, Synapse is available in the Debian repositories and
|
||||
it should be possible to install it with simply:
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
vulnerabilities. You can install the latest version of Synapse from
|
||||
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||
for information on how to use backports.
|
||||
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
|
||||
the [Debian documentation on
|
||||
backports](https://backports.debian.org/Instructions/) for information on how
|
||||
to use them.
|
||||
#### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in downstream Ubuntu at this time, as
|
||||
they are old and suffer from known security vulnerabilities.
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
### Fedora
|
||||
|
||||
@@ -350,6 +383,18 @@ Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Mo
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
### OpenBSD
|
||||
|
||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
Installing Synapse:
|
||||
|
||||
```
|
||||
doas pkg_add synapse
|
||||
```
|
||||
|
||||
### NixOS
|
||||
|
||||
@@ -387,13 +432,11 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
```
|
||||
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
Note that, as pointed out in that document, this feature will not
|
||||
work with installs set up after November 2019.
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself — Synapse had built-in ACME
|
||||
support, but the ACMEv1 protocol Synapse implements is deprecated, not
|
||||
allowed by LetsEncrypt for new sites, and will break for existing sites in
|
||||
late 2020. See [ACME.md](docs/ACME.md).
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
@@ -403,6 +446,60 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
## Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
which support well-known lookup to automatically configure the homeserver and
|
||||
identity server URLs. This is useful so that users don't have to memorize or think
|
||||
about the actual homeserver URL you are using.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
},
|
||||
"m.identity_server": {
|
||||
"base_url": "https://<identity.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To work in browser based clients, the file must be served with the appropriate
|
||||
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
```
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
|
||||
correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
## Email
|
||||
|
||||
@@ -421,7 +518,7 @@ email will be disabled.
|
||||
|
||||
## Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively you can do so from the command line if you have installed via pip.
|
||||
|
||||
|
||||
50
README.rst
50
README.rst
@@ -45,7 +45,7 @@ which handle:
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption[1]
|
||||
end-to-end encryption
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
@@ -82,9 +82,6 @@ at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
@@ -115,12 +112,11 @@ Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Riot at
|
||||
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
(Leave the identity server as the default - see `Identity servers`_.)
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
@@ -137,7 +133,7 @@ it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||
user via a Matrix client.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
@@ -183,30 +179,6 @@ versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
Synapse offers two database engines:
|
||||
* `SQLite <https://sqlite.org/>`_
|
||||
* `PostgreSQL <https://www.postgresql.org>`_
|
||||
|
||||
By default Synapse uses SQLite in and doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
Almost all installations should opt to use PostreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.md <docs/postgres.md>`_.
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
@@ -215,7 +187,7 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
@@ -255,10 +227,9 @@ email address.
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Riot.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
|
||||
or by directly editing the database as shown below.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
@@ -267,7 +238,7 @@ First calculate the hash of the new password::
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database::
|
||||
Then update the ``users`` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
@@ -335,6 +306,9 @@ Building internal API documentation::
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Need help? Join our community support room on Matrix:
|
||||
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
|
||||
18
UPGRADE.rst
18
UPGRADE.rst
@@ -75,6 +75,24 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.18.0
|
||||
====================
|
||||
|
||||
Docker `-py3` suffix will be removed in future versions
|
||||
-------------------------------------------------------
|
||||
|
||||
From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
|
||||
|
||||
On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
|
||||
|
||||
Scripts relying on the `-py3` suffix will need to be updated.
|
||||
|
||||
Redis replication is now recommended in lieu of TCP replication
|
||||
---------------------------------------------------------------
|
||||
|
||||
When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.**
|
||||
See `docs/workers.md <docs/workers.md>`_ for more details.
|
||||
|
||||
Upgrading to v1.14.0
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/7864.bugfix
Normal file
1
changelog.d/7864.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a memory leak by limiting the length of time that messages will be queued for a remote server that has been unreachable.
|
||||
1
changelog.d/8013.feature
Normal file
1
changelog.d/8013.feature
Normal file
@@ -0,0 +1 @@
|
||||
Iteratively encode JSON to avoid blocking the reactor.
|
||||
1
changelog.d/8037.feature
Normal file
1
changelog.d/8037.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8072.misc
Normal file
1
changelog.d/8072.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8074.misc
Normal file
1
changelog.d/8074.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8075.misc
Normal file
1
changelog.d/8075.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8076.misc
Normal file
1
changelog.d/8076.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8081.bugfix
Normal file
1
changelog.d/8081.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix `Re-starting finished log context PUT-nnnn` warning when event persistence failed.
|
||||
1
changelog.d/8085.misc
Normal file
1
changelog.d/8085.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove some unused database functions.
|
||||
1
changelog.d/8087.misc
Normal file
1
changelog.d/8087.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8090.misc
Normal file
1
changelog.d/8090.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.handlers.room`.
|
||||
1
changelog.d/8092.feature
Normal file
1
changelog.d/8092.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for shadow-banning users (ignoring any message send requests).
|
||||
1
changelog.d/8093.misc
Normal file
1
changelog.d/8093.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
1
changelog.d/8100.misc
Normal file
1
changelog.d/8100.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8101.bugfix
Normal file
1
changelog.d/8101.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now correctly enforces the valid characters in the `client_secret` parameter used in various endpoints.
|
||||
1
changelog.d/8107.feature
Normal file
1
changelog.d/8107.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8111.doc
Normal file
1
changelog.d/8111.doc
Normal file
@@ -0,0 +1 @@
|
||||
Link to matrix-synapse-rest-password-provider in the password provider documentation.
|
||||
1
changelog.d/8112.misc
Normal file
1
changelog.d/8112.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
@@ -17,9 +17,6 @@
|
||||
""" Starts a synapse client console. """
|
||||
from __future__ import print_function
|
||||
|
||||
from twisted.internet import reactor, defer, threads
|
||||
from http import TwistedHttpClient
|
||||
|
||||
import argparse
|
||||
import cmd
|
||||
import getpass
|
||||
@@ -28,12 +25,14 @@ import shlex
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
from http import TwistedHttpClient
|
||||
|
||||
import nacl.signing
|
||||
import nacl.encoding
|
||||
import nacl.signing
|
||||
import urlparse
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from twisted.internet import defer, reactor, threads
|
||||
|
||||
CONFIG_JSON = "cmdclient_config.json"
|
||||
|
||||
@@ -493,7 +492,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
"list messages <roomid> from=END&to=START&limit=3"
|
||||
"""
|
||||
args = self._parse(line, ["type", "roomid", "qp"])
|
||||
if not "type" in args or not "roomid" in args:
|
||||
if "type" not in args or "roomid" not in args:
|
||||
print("Must specify type and room ID.")
|
||||
return
|
||||
if args["type"] not in ["members", "messages"]:
|
||||
@@ -508,7 +507,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
key_value = key_value_str.split("=")
|
||||
qp[key_value[0]] = key_value[1]
|
||||
except:
|
||||
except Exception:
|
||||
print("Bad query param: %s" % key_value)
|
||||
return
|
||||
|
||||
@@ -585,7 +584,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
parsed_url = urlparse.urlparse(args["path"])
|
||||
qp.update(urlparse.parse_qs(parsed_url.query))
|
||||
args["path"] = parsed_url.path
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
reactor.callFromThread(
|
||||
@@ -610,13 +609,15 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_event_stream(self, timeout):
|
||||
res = yield self.http_client.get_json(
|
||||
self._url() + "/events",
|
||||
{
|
||||
"access_token": self._tok(),
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token,
|
||||
},
|
||||
res = yield defer.ensureDeferred(
|
||||
self.http_client.get_json(
|
||||
self._url() + "/events",
|
||||
{
|
||||
"access_token": self._tok(),
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token,
|
||||
},
|
||||
)
|
||||
)
|
||||
print(json.dumps(res, indent=4))
|
||||
|
||||
@@ -772,10 +773,10 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
syn_cmd.config = json.load(config)
|
||||
try:
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
print("Loaded config from %s" % config_path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Twisted-specific: Runs the command processor in Twisted's event loop
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
from twisted.web.client import Agent, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
import json
|
||||
import urllib
|
||||
from pprint import pformat
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.client import Agent, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
|
||||
class HttpClient(object):
|
||||
|
||||
@@ -50,7 +50,7 @@ services:
|
||||
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:10-alpine
|
||||
image: docker.io/postgres:12-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
|
||||
@@ -28,27 +28,24 @@ Currently assumes the local address is localhost:<port>
|
||||
"""
|
||||
|
||||
|
||||
from synapse.federation import ReplicationHandler
|
||||
|
||||
from synapse.federation.units import Pdu
|
||||
|
||||
from synapse.util import origin_from_ucid
|
||||
|
||||
from synapse.app.homeserver import SynapseHomeServer
|
||||
|
||||
# from synapse.logging.utils import log_function
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import log
|
||||
|
||||
import argparse
|
||||
import curses.wrapper
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import cursesio
|
||||
import curses.wrapper
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python import log
|
||||
|
||||
from synapse.app.homeserver import SynapseHomeServer
|
||||
from synapse.federation import ReplicationHandler
|
||||
from synapse.federation.units import Pdu
|
||||
from synapse.util import origin_from_ucid
|
||||
|
||||
# from synapse.logging.utils import log_function
|
||||
|
||||
|
||||
logger = logging.getLogger("example")
|
||||
@@ -75,7 +72,7 @@ class InputOutput(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
m = re.match("^join (\S+)$", line)
|
||||
m = re.match(r"^join (\S+)$", line)
|
||||
if m:
|
||||
# The `sender` wants to join a room.
|
||||
(room_name,) = m.groups()
|
||||
@@ -84,7 +81,7 @@ class InputOutput(object):
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match("^invite (\S+) (\S+)$", line)
|
||||
m = re.match(r"^invite (\S+) (\S+)$", line)
|
||||
if m:
|
||||
# `sender` wants to invite someone to a room
|
||||
room_name, invitee = m.groups()
|
||||
@@ -93,7 +90,7 @@ class InputOutput(object):
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match("^send (\S+) (.*)$", line)
|
||||
m = re.match(r"^send (\S+) (.*)$", line)
|
||||
if m:
|
||||
# `sender` wants to message a room
|
||||
room_name, body = m.groups()
|
||||
@@ -102,7 +99,7 @@ class InputOutput(object):
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match("^backfill (\S+)$", line)
|
||||
m = re.match(r"^backfill (\S+)$", line)
|
||||
if m:
|
||||
# we want to backfill a room
|
||||
(room_name,) = m.groups()
|
||||
@@ -201,16 +198,6 @@ class HomeServer(ReplicationHandler):
|
||||
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
|
||||
)
|
||||
|
||||
# def on_state_change(self, pdu):
|
||||
##self.output.print_line("#%s (state) %s *** %s" %
|
||||
##(pdu.context, pdu.state_key, pdu.pdu_type)
|
||||
##)
|
||||
|
||||
# if "joinee" in pdu.content:
|
||||
# self._on_join(pdu.context, pdu.content["joinee"])
|
||||
# elif "invitee" in pdu.content:
|
||||
# self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
|
||||
|
||||
def _on_message(self, pdu):
|
||||
""" We received a message
|
||||
"""
|
||||
@@ -314,7 +301,7 @@ class HomeServer(ReplicationHandler):
|
||||
return self.replication_layer.backfill(dest, room_name, limit)
|
||||
|
||||
def _get_room_remote_servers(self, room_name):
|
||||
return [i for i in self.joined_rooms.setdefault(room_name).servers]
|
||||
return list(self.joined_rooms.setdefault(room_name).servers)
|
||||
|
||||
def _get_or_create_room(self, room_name):
|
||||
return self.joined_rooms.setdefault(room_name, Room(room_name))
|
||||
@@ -334,7 +321,7 @@ def main(stdscr):
|
||||
user = args.user
|
||||
server_name = origin_from_ucid(user)
|
||||
|
||||
## Set up logging ##
|
||||
# Set up logging
|
||||
|
||||
root_logger = logging.getLogger()
|
||||
|
||||
@@ -354,7 +341,7 @@ def main(stdscr):
|
||||
observer = log.PythonLoggingObserver()
|
||||
observer.start()
|
||||
|
||||
## Set up synapse server
|
||||
# Set up synapse server
|
||||
|
||||
curses_stdio = cursesio.CursesStdIO(stdscr)
|
||||
input_output = InputOutput(curses_stdio, user)
|
||||
@@ -368,16 +355,16 @@ def main(stdscr):
|
||||
|
||||
input_output.set_home_server(hs)
|
||||
|
||||
## Add input_output logger
|
||||
# Add input_output logger
|
||||
io_logger = IOLoggerHandler(input_output)
|
||||
io_logger.setFormatter(formatter)
|
||||
root_logger.addHandler(io_logger)
|
||||
|
||||
## Start! ##
|
||||
# Start!
|
||||
|
||||
try:
|
||||
port = int(server_name.split(":")[1])
|
||||
except:
|
||||
except Exception:
|
||||
port = 12345
|
||||
|
||||
app_hs.get_http_server().start_listening(port)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,13 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import cgi
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import pydot
|
||||
import urllib2
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -15,15 +23,6 @@ from __future__ import print_function
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import sqlite3
|
||||
import pydot
|
||||
import cgi
|
||||
import json
|
||||
import datetime
|
||||
import argparse
|
||||
import urllib2
|
||||
|
||||
|
||||
def make_name(pdu_id, origin):
|
||||
return "%s@%s" % (pdu_id, origin)
|
||||
|
||||
@@ -33,7 +32,7 @@ def make_graph(pdus, room, filename_prefix):
|
||||
node_map = {}
|
||||
|
||||
origins = set()
|
||||
colors = set(("red", "green", "blue", "yellow", "purple"))
|
||||
colors = {"red", "green", "blue", "yellow", "purple"}
|
||||
|
||||
for pdu in pdus:
|
||||
origins.add(pdu.get("origin"))
|
||||
@@ -49,7 +48,7 @@ def make_graph(pdus, room, filename_prefix):
|
||||
try:
|
||||
c = colors.pop()
|
||||
color_map[o] = c
|
||||
except:
|
||||
except Exception:
|
||||
print("Run out of colours!")
|
||||
color_map[o] = "black"
|
||||
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import sqlite3
|
||||
import pydot
|
||||
import cgi
|
||||
import json
|
||||
import datetime
|
||||
import argparse
|
||||
import cgi
|
||||
import datetime
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
import pydot
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
@@ -98,7 +99,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
except Exception:
|
||||
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import cgi
|
||||
import datetime
|
||||
|
||||
import pydot
|
||||
import simplejson as json
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -15,18 +25,6 @@ from __future__ import print_function
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import pydot
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print("Reading lines")
|
||||
with open(file_name) as f:
|
||||
@@ -62,7 +60,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, string_types):
|
||||
elif isinstance(value, str):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
@@ -108,7 +106,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
except Exception:
|
||||
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
|
||||
@@ -12,15 +12,15 @@ npm install jquery jsdom
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import gevent
|
||||
import grequests
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
import json
|
||||
import urllib
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
# ACCESS_TOKEN="" #
|
||||
import gevent
|
||||
import grequests
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
|
||||
ACCESS_TOKEN = ""
|
||||
|
||||
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
|
||||
MYUSERNAME = "@davetest:matrix.org"
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
import urllib
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
raw_input
|
||||
|
||||
@@ -15,6 +15,9 @@
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
# If you are using postgresql to persist data, uncomment this line to make sure
|
||||
# synapse starts after the postgresql service.
|
||||
# After=postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
|
||||
58
debian/changelog
vendored
58
debian/changelog
vendored
@@ -1,3 +1,61 @@
|
||||
matrix-synapse-py3 (1.19.0) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.19.0.
|
||||
|
||||
[ Aaron Raimist ]
|
||||
* Fix outdated documentation for SYNAPSE_CACHE_FACTOR
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Aug 2020 14:06:42 +0100
|
||||
|
||||
matrix-synapse-py3 (1.18.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.18.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 30 Jul 2020 10:55:53 +0100
|
||||
|
||||
matrix-synapse-py3 (1.17.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.17.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Jul 2020 10:20:31 +0100
|
||||
|
||||
matrix-synapse-py3 (1.16.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.16.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Jul 2020 12:09:24 +0100
|
||||
|
||||
matrix-synapse-py3 (1.17.0rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.17.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 09 Jul 2020 16:53:12 +0100
|
||||
|
||||
matrix-synapse-py3 (1.16.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.16.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 08 Jul 2020 11:03:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.15.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.15.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 02 Jul 2020 10:34:00 -0400
|
||||
|
||||
matrix-synapse-py3 (1.15.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.15.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Jun 2020 10:27:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.15.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.15.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Jun 2020 13:27:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.14.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.14.0.
|
||||
|
||||
2
debian/matrix-synapse.default
vendored
2
debian/matrix-synapse.default
vendored
@@ -1,2 +1,2 @@
|
||||
# Specify environment variables used when running Synapse
|
||||
# SYNAPSE_CACHE_FACTOR=1 (default)
|
||||
# SYNAPSE_CACHE_FACTOR=0.5 (default)
|
||||
|
||||
27
debian/synctl.ronn
vendored
27
debian/synctl.ronn
vendored
@@ -46,19 +46,20 @@ Configuration file may be generated as follows:
|
||||
## ENVIRONMENT
|
||||
|
||||
* `SYNAPSE_CACHE_FACTOR`:
|
||||
Synapse's architecture is quite RAM hungry currently - a lot of
|
||||
recent room data and metadata is deliberately cached in RAM in
|
||||
order to speed up common requests. This will be improved in
|
||||
future, but for now the easiest way to either reduce the RAM usage
|
||||
(at the risk of slowing things down) is to set the
|
||||
SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
|
||||
SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
|
||||
resident memory - this is what we currently run the matrix.org
|
||||
on. The default setting is currently 0.1, which is probably around
|
||||
a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it
|
||||
up if you need performance for lots of users and have a box with a
|
||||
lot of RAM.
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
## COPYRIGHT
|
||||
|
||||
|
||||
@@ -16,34 +16,31 @@ ARG PYTHON_VERSION=3.7
|
||||
###
|
||||
### Stage 0: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
RUN apk add \
|
||||
build-base \
|
||||
libffi-dev \
|
||||
libjpeg-turbo-dev \
|
||||
libressl-dev \
|
||||
libxslt-dev \
|
||||
linux-headers \
|
||||
postgresql-dev \
|
||||
zlib-dev
|
||||
|
||||
# build things which have slow build steps, before we copy synapse, so that
|
||||
# the layer can be cached.
|
||||
#
|
||||
# (we really just care about caching a wheel here, as the "pip install" below
|
||||
# will install them again.)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
cryptography \
|
||||
msgpack-python \
|
||||
pillow \
|
||||
pynacl
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
prometheus-client \
|
||||
psycopg2 \
|
||||
pycparser \
|
||||
pyrsistent \
|
||||
pyyaml \
|
||||
simplejson \
|
||||
threadloop \
|
||||
thrift
|
||||
|
||||
# now install synapse and all of the python deps to /install.
|
||||
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
@@ -55,19 +52,13 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
### Stage 1: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
# xmlsec is required for saml support
|
||||
RUN apk add --no-cache --virtual .runtime_deps \
|
||||
libffi \
|
||||
libjpeg-turbo \
|
||||
libressl \
|
||||
libxslt \
|
||||
libpq \
|
||||
zlib \
|
||||
su-exec \
|
||||
tzdata \
|
||||
xmlsec
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libpq5 \
|
||||
xmlsec1 \
|
||||
gosu \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
|
||||
@@ -28,7 +28,7 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
|
||||
# fetch and unpack the package
|
||||
RUN mkdir /dh-virtualenv
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/matrix-org/dh-virtualenv/archive/matrixorg-20200519.tar.gz
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||
|
||||
# install its build deps. We do another apt-cache-update here, because we might
|
||||
|
||||
@@ -94,6 +94,21 @@ The following environment variables are supported in run mode:
|
||||
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
|
||||
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
|
||||
|
||||
## Generating an (admin) user
|
||||
|
||||
After synapse is running, you may wish to create a user via `register_new_matrix_user`.
|
||||
|
||||
This requires a `registration_shared_secret` to be set in your config file. Synapse
|
||||
must be restarted to pick up this change.
|
||||
|
||||
You can then call the script:
|
||||
|
||||
```
|
||||
docker exec -it synapse register_new_matrix_user http://localhost:8008 -c /data/homeserver.yaml --help
|
||||
```
|
||||
|
||||
Remember to remove the `registration_shared_secret` and restart if you no-longer need it.
|
||||
|
||||
## TLS support
|
||||
|
||||
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||
|
||||
@@ -4,16 +4,10 @@ formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
|
||||
@@ -120,7 +120,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
|
||||
|
||||
if ownership is not None:
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args = ["su-exec", ownership] + args
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
subprocess.check_output(args)
|
||||
|
||||
@@ -172,8 +172,8 @@ def run_generate_config(environ, ownership):
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
args = ["su-exec", ownership] + args
|
||||
os.execv("/sbin/su-exec", args)
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
@@ -189,7 +189,7 @@ def main(args, environ):
|
||||
ownership = "{}:{}".format(desired_uid, desired_gid)
|
||||
|
||||
if ownership is None:
|
||||
log("Will not perform chmod/su-exec as UserID already matches request")
|
||||
log("Will not perform chmod/gosu as UserID already matches request")
|
||||
|
||||
# In generate mode, generate a configuration and missing keys, then exit
|
||||
if mode == "generate":
|
||||
@@ -236,8 +236,8 @@ running with 'migrate_config'. See the README for more details.
|
||||
|
||||
args = ["python", "-m", synapse_worker, "--config-path", config_path]
|
||||
if ownership is not None:
|
||||
args = ["su-exec", ownership] + args
|
||||
os.execv("/sbin/su-exec", args)
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
|
||||
@@ -10,5 +10,16 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
# s = second
|
||||
# m = minute
|
||||
# h = hour
|
||||
# d = day
|
||||
# w = week
|
||||
# y = year
|
||||
# For example, setting redaction_retention_period: 5m would remove redacted
|
||||
# messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
@@ -12,13 +12,14 @@ introduced support for automatically provisioning certificates through
|
||||
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
|
||||
Let's Encrypt announced that they were deprecating version 1 of the ACME
|
||||
protocol, with the plan to disable the use of it for new accounts in
|
||||
November 2019, and for existing accounts in June 2020.
|
||||
November 2019, for new domains in June 2020, and for existing accounts and
|
||||
domains in June 2021.
|
||||
|
||||
Synapse doesn't currently support version 2 of the ACME protocol, which
|
||||
means that:
|
||||
|
||||
* for existing installs, Synapse's built-in ACME support will continue
|
||||
to work until June 2020.
|
||||
to work until June 2021.
|
||||
* for new installs, this feature will not work at all.
|
||||
|
||||
Either way, it is recommended to move from Synapse's ACME support
|
||||
|
||||
@@ -4,17 +4,21 @@ Admin APIs
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
|
||||
Only users that are server admins can use these APIs. A user can be marked as a
|
||||
server admin by updating the database directly, e.g.:
|
||||
Authenticating as a server admin
|
||||
--------------------------------
|
||||
|
||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||
Many of the API calls in the admin api will require an `access_token` for a
|
||||
server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
Restarting may be required for the changes to register.
|
||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||
|
||||
Using an admin access_token
|
||||
###########################
|
||||
.. code-block:: sql
|
||||
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
|
||||
A new server admin user can also be created using the
|
||||
``register_new_matrix_user`` script.
|
||||
|
||||
Many of the API calls listed in the documentation here will require to include an admin `access_token`.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
|
||||
|
||||
@@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all
|
||||
users out of the group so that their clients will correctly handle the group
|
||||
being deleted.
|
||||
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/delete_group/<group_id>
|
||||
```
|
||||
|
||||
including an `access_token` of a server admin.
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
@@ -6,9 +6,10 @@ The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/room/<room_id>/media
|
||||
```
|
||||
including an `access_token` of a server admin.
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
The API returns a JSON body like the following:
|
||||
```
|
||||
{
|
||||
"local": [
|
||||
@@ -99,4 +100,3 @@ Response:
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -15,7 +15,8 @@ The API is:
|
||||
|
||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
By default, events sent by local users are not deleted, as they may represent
|
||||
the only copies of this content in existence. (Events sent by remote users are
|
||||
@@ -54,8 +55,10 @@ It is possible to poll for updates on recent purges with a second API;
|
||||
|
||||
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
|
||||
|
||||
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||
the following:
|
||||
Again, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin.
|
||||
|
||||
This API returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
@@ -6,12 +6,15 @@ media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
{}
|
||||
|
||||
Which will remove all cached media that was last accessed before
|
||||
\... which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
|
||||
@@ -5,6 +5,8 @@ This API will remove all trace of a room from your database.
|
||||
|
||||
All local users must have left the room before it can be removed.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
|
||||
@@ -23,7 +23,8 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
|
||||
}
|
||||
```
|
||||
|
||||
Including an `access_token` of a server admin.
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
Response:
|
||||
|
||||
|
||||
@@ -318,3 +318,134 @@ Response:
|
||||
"state_events": 93534
|
||||
}
|
||||
```
|
||||
|
||||
# Room Members API
|
||||
|
||||
The Room Members admin API allows server admins to get a list of all members of a room.
|
||||
|
||||
The response includes the following fields:
|
||||
|
||||
* `members` - A list of all the members that are present in the room, represented by their ids.
|
||||
* `total` - Total number of members in the room.
|
||||
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/members
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"members": [
|
||||
"@foo:matrix.org",
|
||||
"@bar:matrix.org",
|
||||
"@foobar:matrix.org
|
||||
],
|
||||
"total": 3
|
||||
}
|
||||
```
|
||||
|
||||
# Delete Room API
|
||||
|
||||
The Delete Room admin API allows server admins to remove rooms from server
|
||||
and block these rooms.
|
||||
It is a combination and improvement of "[Shutdown room](shutdown_room.md)"
|
||||
and "[Purge room](purge_room.md)" API.
|
||||
|
||||
Shuts down a room. Moves all local users and room aliases automatically to a
|
||||
new room if `new_room_user_id` is set. Otherwise local users only
|
||||
leave the room without any information.
|
||||
|
||||
The new room will be created with the user specified by the `new_room_user_id` parameter
|
||||
as room administrator and will contain a message explaining what happened. Users invited
|
||||
to the new room will have power level `-10` by default, and thus be unable to speak.
|
||||
|
||||
If `block` is `True` it prevents new joins to the old room.
|
||||
|
||||
This API will remove all trace of the old room from your database after removing
|
||||
all local users. If `purge` is `true` (the default), all traces of the old room will
|
||||
be removed from your database after removing all local users. If you do not want
|
||||
this to happen, set `purge` to `false`.
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer.
|
||||
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
The API is:
|
||||
|
||||
```json
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
with a body of:
|
||||
```json
|
||||
{
|
||||
"new_room_user_id": "@someuser:example.com",
|
||||
"room_name": "Content Violation Notification",
|
||||
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
|
||||
"block": true,
|
||||
"purge": true
|
||||
}
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"kicked_users": [
|
||||
"@foobar:example.com"
|
||||
],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [
|
||||
"#badroom:example.com",
|
||||
"#evilsaloon:example.com"
|
||||
],
|
||||
"new_room_id": "!newroomid:example.com"
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `room_id` - The ID of the room.
|
||||
|
||||
The following JSON body parameters are available:
|
||||
|
||||
* `new_room_user_id` - Optional. If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be moved into that
|
||||
room. If not set, no new room will be created and the users will just be removed
|
||||
from the old room. The user ID must be on the local server, but does not necessarily
|
||||
have to belong to a registered user.
|
||||
* `room_name` - Optional. A string representing the name of the room that new users will be
|
||||
invited to. Defaults to `Content Violation Notification`
|
||||
* `message` - Optional. A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down. Defaults to `Sharing illegal content on this server
|
||||
is not permitted and rooms in violation will be blocked.`
|
||||
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
|
||||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
## Response
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `kicked_users` - An array of users (`user_id`) that were kicked.
|
||||
* `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
|
||||
* `local_aliases` - An array of strings representing the local aliases that were migrated from
|
||||
the old room to the new.
|
||||
* `new_room_id` - A string representing the room ID of the new room.
|
||||
|
||||
@@ -10,6 +10,8 @@ disallow any further invites or joins.
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
## API
|
||||
|
||||
You will need to authenticate with an access token for an admin user.
|
||||
@@ -31,7 +33,7 @@ You will need to authenticate with an access token for an admin user.
|
||||
* `message` - Optional. A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down.
|
||||
|
||||
|
||||
If not specified, the default value of `room_name` is "Content Violation
|
||||
Notification". The default value of `message` is "Sharing illegal content on
|
||||
othis server is not permitted and rooms in violation will be blocked."
|
||||
@@ -70,3 +72,30 @@ Response:
|
||||
"new_room_id": "!newroomid:example.com",
|
||||
},
|
||||
```
|
||||
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
@@ -1,9 +1,47 @@
|
||||
.. contents::
|
||||
|
||||
Query User Account
|
||||
==================
|
||||
|
||||
This API returns information about a specific user account.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
URL parameters:
|
||||
|
||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
||||
|
||||
Create or modify Account
|
||||
========================
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example,
|
||||
``@user:server.com``.
|
||||
specific ``user_id``.
|
||||
|
||||
This api is::
|
||||
|
||||
@@ -31,27 +69,36 @@ with a body of:
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
Parameters:
|
||||
URL parameters:
|
||||
|
||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- ``password``, optional. If provided, the user's password is updated and all
|
||||
devices are logged out.
|
||||
|
||||
|
||||
- ``displayname``, optional, defaults to the value of ``user_id``.
|
||||
|
||||
- ``threepids``, optional, allows setting the third-party IDs (email, msisdn)
|
||||
belonging to a user.
|
||||
|
||||
- ``avatar_url``, optional, must be a
|
||||
- ``avatar_url``, optional, must be a
|
||||
`MXC URI <https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris>`_.
|
||||
|
||||
- ``admin``, optional, defaults to ``false``.
|
||||
|
||||
- ``deactivated``, optional, defaults to ``false``.
|
||||
- ``deactivated``, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to ``false`` for new accounts.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
In order to re-activate an account ``deactivated`` must be set to ``false``. If
|
||||
users do not login via single-sign-on, a new ``password`` must be provided.
|
||||
|
||||
List Accounts
|
||||
=============
|
||||
|
||||
@@ -61,7 +108,8 @@ The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
@@ -116,17 +164,17 @@ with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
Query Account
|
||||
=============
|
||||
Query current sessions for a user
|
||||
=================================
|
||||
|
||||
This API returns information about a specific user account.
|
||||
This API returns information about the active sessions for a specific user.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v1/whois/<user_id> (deprecated)
|
||||
GET /_synapse/admin/v2/users/<user_id>
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
@@ -179,9 +227,10 @@ with a body of:
|
||||
"erase": true
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The erase parameter is optional and defaults to 'false'.
|
||||
The erase parameter is optional and defaults to ``false``.
|
||||
An empty body may be passed for backwards compatibility.
|
||||
|
||||
|
||||
@@ -203,7 +252,8 @@ with a body of:
|
||||
"logout_devices": true,
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``new_password`` is required.
|
||||
The parameter ``logout_devices`` is optional and defaults to ``true``.
|
||||
@@ -216,7 +266,8 @@ The api is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/admin
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
@@ -244,4 +295,191 @@ with a body of:
|
||||
"admin": true
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
|
||||
User devices
|
||||
============
|
||||
|
||||
List all devices
|
||||
----------------
|
||||
Gets information about all devices for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>/devices
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"devices": [
|
||||
{
|
||||
"device_id": "QBUAZIFURK",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
},
|
||||
{
|
||||
"device_id": "AUIECTSRND",
|
||||
"display_name": "ios",
|
||||
"last_seen_ip": "1.2.3.5",
|
||||
"last_seen_ts": 1474491775025,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``devices`` - An array of objects, each containing information about a device.
|
||||
Device objects contain the following fields:
|
||||
|
||||
- ``device_id`` - Identifier of device.
|
||||
- ``display_name`` - Display name set by the user for this device.
|
||||
Absent if no name has been set.
|
||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- ``user_id`` - Owner of device.
|
||||
|
||||
Delete multiple devices
|
||||
------------------
|
||||
Deletes the given devices for a specific ``user_id``, and invalidates
|
||||
any access token associated with them.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v2/users/<user_id>/delete_devices
|
||||
|
||||
{
|
||||
"devices": [
|
||||
"QBUAZIFURK",
|
||||
"AUIECTSRND"
|
||||
],
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
The following fields are required in the JSON request body:
|
||||
|
||||
- ``devices`` - The list of device IDs to delete.
|
||||
|
||||
Show a device
|
||||
---------------
|
||||
Gets information on a single device, by ``device_id`` for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"device_id": "<device_id>",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to retrieve.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``device_id`` - Identifier of device.
|
||||
- ``display_name`` - Display name set by the user for this device.
|
||||
Absent if no name has been set.
|
||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- ``user_id`` - Owner of device.
|
||||
|
||||
Update a device
|
||||
---------------
|
||||
Updates the metadata on the given ``device_id`` for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
{
|
||||
"display_name": "My other phone"
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to update.
|
||||
|
||||
The following fields are required in the JSON request body:
|
||||
|
||||
- ``display_name`` - The new display name for this device. If not given,
|
||||
the display name is unchanged.
|
||||
|
||||
Delete a device
|
||||
---------------
|
||||
Deletes the given ``device_id`` for a specific ``user_id``,
|
||||
and invalidates any access token associated with it.
|
||||
|
||||
The API is::
|
||||
|
||||
DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
{}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to delete.
|
||||
|
||||
175
docs/dev/oidc.md
175
docs/dev/oidc.md
@@ -1,175 +0,0 @@
|
||||
# How to test OpenID Connect
|
||||
|
||||
Any OpenID Connect Provider (OP) should work with Synapse, as long as it supports the authorization code flow.
|
||||
There are a few options for that:
|
||||
|
||||
- start a local OP. Synapse has been tested with [Hydra][hydra] and [Dex][dex-idp].
|
||||
Note that for an OP to work, it should be served under a secure (HTTPS) origin.
|
||||
A certificate signed with a self-signed, locally trusted CA should work. In that case, start Synapse with a `SSL_CERT_FILE` environment variable set to the path of the CA.
|
||||
- use a publicly available OP. Synapse has been tested with [Google][google-idp].
|
||||
- setup a SaaS OP, like [Auth0][auth0] and [Okta][okta]. Auth0 has a free tier which has been tested with Synapse.
|
||||
|
||||
[google-idp]: https://developers.google.com/identity/protocols/OpenIDConnect#authenticatingtheuser
|
||||
[auth0]: https://auth0.com/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[hydra]: https://www.ory.sh/docs/hydra/
|
||||
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
|
||||
### [Dex][dex-idp]
|
||||
|
||||
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider, with some external database, it can be configured with static passwords in a config file.
|
||||
|
||||
Follow the [Getting Started guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md) to install Dex.
|
||||
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
|
||||
```yaml
|
||||
staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse base url]/_synapse/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dex.yaml`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
discover: true
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.name }}'
|
||||
display_name_template: '{{ user.name|capitalize }}'
|
||||
```
|
||||
|
||||
### [Auth0][auth0]
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse base url]/_synapse/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
|
||||
```js
|
||||
function addPersistenceAttribute(user, context, callback) {
|
||||
user.user_metadata = user.user_metadata || {};
|
||||
user.user_metadata.preferred_username = user.user_metadata.preferred_username || user.user_id;
|
||||
context.idToken.preferred_username = user.user_metadata.preferred_username;
|
||||
|
||||
auth0.users.updateUserMetadata(user.user_id, user.user_metadata)
|
||||
.then(function(){
|
||||
callback(null, user, context);
|
||||
})
|
||||
.catch(function(err){
|
||||
callback(err);
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
GitHub is a bit special as it is not an OpenID Connect compliant provider, but just a regular OAuth2 provider.
|
||||
The `/user` API endpoint can be used to retrieve informations from the user.
|
||||
As the OIDC login mechanism needs an attribute to uniquely identify users and that endpoint does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new
|
||||
2. Set the callback URL to `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://github.com/"
|
||||
discover: false
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes:
|
||||
- read:user
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: 'id'
|
||||
localpart_template: '{{ user.login }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Google
|
||||
|
||||
1. Setup a project in the Google API Console
|
||||
2. Obtain the OAuth 2.0 credentials (see <https://developers.google.com/identity/protocols/oauth2/openid-connect>)
|
||||
3. Add this Authorized redirect URI: `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://accounts.google.com/"
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes:
|
||||
- openid
|
||||
- profile
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.given_name|lower }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse base url]/_synapse/oidc/callback`
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
discover: true
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes:
|
||||
- openid
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
97
docs/jwt.md
Normal file
97
docs/jwt.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# JWT Login Type
|
||||
|
||||
Synapse comes with a non-standard login type to support
|
||||
[JSON Web Tokens](https://en.wikipedia.org/wiki/JSON_Web_Token). In general the
|
||||
documentation for
|
||||
[the login endpoint](https://matrix.org/docs/spec/client_server/r0.6.1#login)
|
||||
is still valid (and the mechanism works similarly to the
|
||||
[token based login](https://matrix.org/docs/spec/client_server/r0.6.1#token-based)).
|
||||
|
||||
To log in using a JSON Web Token, clients should submit a `/login` request as
|
||||
follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "org.matrix.login.jwt",
|
||||
"token": "<jwt>"
|
||||
}
|
||||
```
|
||||
|
||||
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
|
||||
will be removed in a future version of Synapse.
|
||||
|
||||
The `token` field should include the JSON web token with the following claims:
|
||||
|
||||
* The `sub` (subject) claim is required and should encode the local part of the
|
||||
user ID.
|
||||
* The expiration time (`exp`), not before time (`nbf`), and issued at (`iat`)
|
||||
claims are optional, but validated if present.
|
||||
* The issuer (`iss`) claim is optional, but required and validated if configured.
|
||||
* The audience (`aud`) claim is optional, but required and validated if configured.
|
||||
Providing the audience claim when not configured will cause validation to fail.
|
||||
|
||||
In the case that the token is not valid, the homeserver must respond with
|
||||
`403 Forbidden` and an error code of `M_FORBIDDEN`.
|
||||
|
||||
As with other login types, there are additional fields (e.g. `device_id` and
|
||||
`initial_device_display_name`) which can be included in the above request.
|
||||
|
||||
## Preparing Synapse
|
||||
|
||||
The JSON Web Token integration in Synapse uses the
|
||||
[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed
|
||||
as follows:
|
||||
|
||||
* The relevant libraries are included in the Docker images and Debian packages
|
||||
provided by `matrix.org` so no further action is needed.
|
||||
|
||||
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
|
||||
install synapse[pyjwt]` to install the necessary dependencies.
|
||||
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the JSON web token integration, you should then add an `jwt_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
sample settings.
|
||||
|
||||
## How to test JWT as a developer
|
||||
|
||||
Although JSON Web Tokens are typically generated from an external server, the
|
||||
examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly.
|
||||
|
||||
1. Configure Synapse with JWT logins, note that this example uses a pre-shared
|
||||
secret and an algorithm of HS256:
|
||||
|
||||
```yaml
|
||||
jwt_config:
|
||||
enabled: true
|
||||
secret: "my-secret-token"
|
||||
algorithm: "HS256"
|
||||
```
|
||||
2. Generate a JSON web token:
|
||||
|
||||
```bash
|
||||
$ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user
|
||||
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc
|
||||
```
|
||||
3. Query for the login types and ensure `org.matrix.login.jwt` is there:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/_matrix/client/r0/login
|
||||
```
|
||||
4. Login used the generated JSON web token from above:
|
||||
|
||||
```bash
|
||||
$ curl http://localhost:8082/_matrix/client/r0/login -X POST \
|
||||
--data '{"type":"org.matrix.login.jwt","token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc"}'
|
||||
{
|
||||
"access_token": "<access token>",
|
||||
"device_id": "ACBDEFGHI",
|
||||
"home_server": "localhost:8080",
|
||||
"user_id": "@test-user:localhost:8480"
|
||||
}
|
||||
```
|
||||
|
||||
You should now be able to use the returned access token to query the client API.
|
||||
@@ -27,7 +27,7 @@
|
||||
different thread to Synapse. This can make it more resilient to
|
||||
heavy load meaning metrics cannot be retrieved, and can be exposed
|
||||
to just internal networks easier. The served metrics are available
|
||||
over HTTP only, and will be available at `/`.
|
||||
over HTTP only, and will be available at `/_synapse/metrics`.
|
||||
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
|
||||
250
docs/openid.md
Normal file
250
docs/openid.md
Normal file
@@ -0,0 +1,250 @@
|
||||
# Configuring Synapse to authenticate against an OpenID Connect provider
|
||||
|
||||
Synapse can be configured to use an OpenID Connect Provider (OP) for
|
||||
authentication, instead of its own local password database.
|
||||
|
||||
Any OP should work with Synapse, as long as it supports the authorization code
|
||||
flow. There are a few options for that:
|
||||
|
||||
- start a local OP. Synapse has been tested with [Hydra][hydra] and
|
||||
[Dex][dex-idp]. Note that for an OP to work, it should be served under a
|
||||
secure (HTTPS) origin. A certificate signed with a self-signed, locally
|
||||
trusted CA should work. In that case, start Synapse with a `SSL_CERT_FILE`
|
||||
environment variable set to the path of the CA.
|
||||
|
||||
- set up a SaaS OP, like [Google][google-idp], [Auth0][auth0] or
|
||||
[Okta][okta]. Synapse has been tested with Auth0 and Google.
|
||||
|
||||
It may also be possible to use other OAuth2 providers which provide the
|
||||
[authorization code grant type](https://tools.ietf.org/html/rfc6749#section-4.1),
|
||||
such as [Github][github-idp].
|
||||
|
||||
[google-idp]: https://developers.google.com/identity/protocols/oauth2/openid-connect
|
||||
[auth0]: https://auth0.com/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
|
||||
[hydra]: https://www.ory.sh/docs/hydra/
|
||||
[github-idp]: https://developer.github.com/apps/building-oauth-apps/authorizing-oauth-apps
|
||||
|
||||
## Preparing Synapse
|
||||
|
||||
The OpenID integration in Synapse uses the
|
||||
[`authlib`](https://pypi.org/project/Authlib/) library, which must be installed
|
||||
as follows:
|
||||
|
||||
* The relevant libraries are included in the Docker images and Debian packages
|
||||
provided by `matrix.org` so no further action is needed.
|
||||
|
||||
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
|
||||
install synapse[oidc]` to install the necessary dependencies.
|
||||
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the OpenID integration, you should then add an `oidc_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
sample settings, as well as the text below for example configurations for
|
||||
specific providers.
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
|
||||
### [Dex][dex-idp]
|
||||
|
||||
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider with an
|
||||
external database, it can be configured with static passwords in a config file.
|
||||
|
||||
Follow the [Getting Started
|
||||
guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md)
|
||||
to install Dex.
|
||||
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
|
||||
```yaml
|
||||
staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse public baseurl]/_synapse/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dex.yaml`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
```
|
||||
### [Keycloak][keycloak-idp]
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
|
||||
2. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
|
||||
3. Click `Save`
|
||||
4. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Enabled | `On` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/oidc/callback` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Client Authenticator | `Client ID and Secret` |
|
||||
|
||||
7. Click `Regenerate Secret`
|
||||
8. Copy Secret
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
```
|
||||
### [Auth0][auth0]
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
|
||||
```js
|
||||
function addPersistenceAttribute(user, context, callback) {
|
||||
user.user_metadata = user.user_metadata || {};
|
||||
user.user_metadata.preferred_username = user.user_metadata.preferred_username || user.user_id;
|
||||
context.idToken.preferred_username = user.user_metadata.preferred_username;
|
||||
|
||||
auth0.users.updateUserMetadata(user.user_id, user.user_metadata)
|
||||
.then(function(){
|
||||
callback(null, user, context);
|
||||
})
|
||||
.catch(function(err){
|
||||
callback(err);
|
||||
});
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
GitHub is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
just a regular OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
|
||||
can be used to retrieve information on the authenticated user. As the Synaspse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### [Google][google-idp]
|
||||
|
||||
1. Set up a project in the Google API Console (see
|
||||
https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup).
|
||||
2. add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
@@ -14,107 +14,109 @@ password auth provider module implementations:
|
||||
|
||||
* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/)
|
||||
* [matrix-synapse-shared-secret-auth](https://github.com/devture/matrix-synapse-shared-secret-auth)
|
||||
* [matrix-synapse-rest-password-provider](https://github.com/ma1uta/matrix-synapse-rest-password-provider)
|
||||
|
||||
## Required methods
|
||||
|
||||
Password auth provider classes must provide the following methods:
|
||||
|
||||
*class* `SomeProvider.parse_config`(*config*)
|
||||
* `parse_config(config)`
|
||||
This method is passed the `config` object for this module from the
|
||||
homeserver configuration file.
|
||||
|
||||
> This method is passed the `config` object for this module from the
|
||||
> homeserver configuration file.
|
||||
>
|
||||
> It should perform any appropriate sanity checks on the provided
|
||||
> configuration, and return an object which is then passed into
|
||||
> `__init__`.
|
||||
It should perform any appropriate sanity checks on the provided
|
||||
configuration, and return an object which is then passed into
|
||||
|
||||
*class* `SomeProvider`(*config*, *account_handler*)
|
||||
This method should have the `@staticmethod` decoration.
|
||||
|
||||
> The constructor is passed the config object returned by
|
||||
> `parse_config`, and a `synapse.module_api.ModuleApi` object which
|
||||
> allows the password provider to check if accounts exist and/or create
|
||||
> new ones.
|
||||
* `__init__(self, config, account_handler)`
|
||||
|
||||
The constructor is passed the config object returned by
|
||||
`parse_config`, and a `synapse.module_api.ModuleApi` object which
|
||||
allows the password provider to check if accounts exist and/or create
|
||||
new ones.
|
||||
|
||||
## Optional methods
|
||||
|
||||
Password auth provider classes may optionally provide the following
|
||||
methods.
|
||||
Password auth provider classes may optionally provide the following methods:
|
||||
|
||||
*class* `SomeProvider.get_db_schema_files`()
|
||||
* `get_db_schema_files(self)`
|
||||
|
||||
> This method, if implemented, should return an Iterable of
|
||||
> `(name, stream)` pairs of database schema files. Each file is applied
|
||||
> in turn at initialisation, and a record is then made in the database
|
||||
> so that it is not re-applied on the next start.
|
||||
This method, if implemented, should return an Iterable of
|
||||
`(name, stream)` pairs of database schema files. Each file is applied
|
||||
in turn at initialisation, and a record is then made in the database
|
||||
so that it is not re-applied on the next start.
|
||||
|
||||
`someprovider.get_supported_login_types`()
|
||||
* `get_supported_login_types(self)`
|
||||
|
||||
> This method, if implemented, should return a `dict` mapping from a
|
||||
> login type identifier (such as `m.login.password`) to an iterable
|
||||
> giving the fields which must be provided by the user in the submission
|
||||
> to the `/login` api. These fields are passed in the `login_dict`
|
||||
> dictionary to `check_auth`.
|
||||
>
|
||||
> For example, if a password auth provider wants to implement a custom
|
||||
> login type of `com.example.custom_login`, where the client is expected
|
||||
> to pass the fields `secret1` and `secret2`, the provider should
|
||||
> implement this method and return the following dict:
|
||||
>
|
||||
> {"com.example.custom_login": ("secret1", "secret2")}
|
||||
This method, if implemented, should return a `dict` mapping from a
|
||||
login type identifier (such as `m.login.password`) to an iterable
|
||||
giving the fields which must be provided by the user in the submission
|
||||
to [the `/login` API](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
|
||||
These fields are passed in the `login_dict` dictionary to `check_auth`.
|
||||
|
||||
`someprovider.check_auth`(*username*, *login_type*, *login_dict*)
|
||||
For example, if a password auth provider wants to implement a custom
|
||||
login type of `com.example.custom_login`, where the client is expected
|
||||
to pass the fields `secret1` and `secret2`, the provider should
|
||||
implement this method and return the following dict:
|
||||
|
||||
> This method is the one that does the real work. If implemented, it
|
||||
> will be called for each login attempt where the login type matches one
|
||||
> of the keys returned by `get_supported_login_types`.
|
||||
>
|
||||
> It is passed the (possibly UNqualified) `user` provided by the client,
|
||||
> the login type, and a dictionary of login secrets passed by the
|
||||
> client.
|
||||
>
|
||||
> The method should return a Twisted `Deferred` object, which resolves
|
||||
> to the canonical `@localpart:domain` user id if authentication is
|
||||
> successful, and `None` if not.
|
||||
>
|
||||
> Alternatively, the `Deferred` can resolve to a `(str, func)` tuple, in
|
||||
> which case the second field is a callback which will be called with
|
||||
> the result from the `/login` call (including `access_token`,
|
||||
> `device_id`, etc.)
|
||||
```python
|
||||
{"com.example.custom_login": ("secret1", "secret2")}
|
||||
```
|
||||
|
||||
`someprovider.check_3pid_auth`(*medium*, *address*, *password*)
|
||||
* `check_auth(self, username, login_type, login_dict)`
|
||||
|
||||
> This method, if implemented, is called when a user attempts to
|
||||
> register or log in with a third party identifier, such as email. It is
|
||||
> passed the medium (ex. "email"), an address (ex.
|
||||
> "<jdoe@example.com>") and the user's password.
|
||||
>
|
||||
> The method should return a Twisted `Deferred` object, which resolves
|
||||
> to a `str` containing the user's (canonical) User ID if
|
||||
> authentication was successful, and `None` if not.
|
||||
>
|
||||
> As with `check_auth`, the `Deferred` may alternatively resolve to a
|
||||
> `(user_id, callback)` tuple.
|
||||
This method does the real work. If implemented, it
|
||||
will be called for each login attempt where the login type matches one
|
||||
of the keys returned by `get_supported_login_types`.
|
||||
|
||||
`someprovider.check_password`(*user_id*, *password*)
|
||||
It is passed the (possibly unqualified) `user` field provided by the client,
|
||||
the login type, and a dictionary of login secrets passed by the
|
||||
client.
|
||||
|
||||
> This method provides a simpler interface than
|
||||
> `get_supported_login_types` and `check_auth` for password auth
|
||||
> providers that just want to provide a mechanism for validating
|
||||
> `m.login.password` logins.
|
||||
>
|
||||
> Iif implemented, it will be called to check logins with an
|
||||
> `m.login.password` login type. It is passed a qualified
|
||||
> `@localpart:domain` user id, and the password provided by the user.
|
||||
>
|
||||
> The method should return a Twisted `Deferred` object, which resolves
|
||||
> to `True` if authentication is successful, and `False` if not.
|
||||
The method should return an `Awaitable` object, which resolves
|
||||
to the canonical `@localpart:domain` user ID if authentication is
|
||||
successful, and `None` if not.
|
||||
|
||||
`someprovider.on_logged_out`(*user_id*, *device_id*, *access_token*)
|
||||
Alternatively, the `Awaitable` can resolve to a `(str, func)` tuple, in
|
||||
which case the second field is a callback which will be called with
|
||||
the result from the `/login` call (including `access_token`,
|
||||
`device_id`, etc.)
|
||||
|
||||
> This method, if implemented, is called when a user logs out. It is
|
||||
> passed the qualified user ID, the ID of the deactivated device (if
|
||||
> any: access tokens are occasionally created without an associated
|
||||
> device ID), and the (now deactivated) access token.
|
||||
>
|
||||
> It may return a Twisted `Deferred` object; the logout request will
|
||||
> wait for the deferred to complete but the result is ignored.
|
||||
* `check_3pid_auth(self, medium, address, password)`
|
||||
|
||||
This method, if implemented, is called when a user attempts to
|
||||
register or log in with a third party identifier, such as email. It is
|
||||
passed the medium (ex. "email"), an address (ex.
|
||||
"<jdoe@example.com>") and the user's password.
|
||||
|
||||
The method should return an `Awaitable` object, which resolves
|
||||
to a `str` containing the user's (canonical) User id if
|
||||
authentication was successful, and `None` if not.
|
||||
|
||||
As with `check_auth`, the `Awaitable` may alternatively resolve to a
|
||||
`(user_id, callback)` tuple.
|
||||
|
||||
* `check_password(self, user_id, password)`
|
||||
|
||||
This method provides a simpler interface than
|
||||
`get_supported_login_types` and `check_auth` for password auth
|
||||
providers that just want to provide a mechanism for validating
|
||||
`m.login.password` logins.
|
||||
|
||||
If implemented, it will be called to check logins with an
|
||||
`m.login.password` login type. It is passed a qualified
|
||||
`@localpart:domain` user id, and the password provided by the user.
|
||||
|
||||
The method should return an `Awaitable` object, which resolves
|
||||
to `True` if authentication is successful, and `False` if not.
|
||||
|
||||
* `on_logged_out(self, user_id, device_id, access_token)`
|
||||
|
||||
This method, if implemented, is called when a user logs out. It is
|
||||
passed the qualified user ID, the ID of the deactivated device (if
|
||||
any: access tokens are occasionally created without an associated
|
||||
device ID), and the (now deactivated) access token.
|
||||
|
||||
It may return an `Awaitable` object; the logout request will
|
||||
wait for the `Awaitable` to complete, but the result is ignored.
|
||||
|
||||
@@ -188,6 +188,9 @@ to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
Note that the database may take up significantly more (25% - 100% more)
|
||||
space on disk after porting to Postgres.
|
||||
|
||||
### Using the port script
|
||||
|
||||
Firstly, shut down the currently running synapse server and copy its
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
It is recommended to put a reverse proxy such as
|
||||
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
|
||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||
[Caddy](https://caddyserver.com/docs/proxy) or
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
|
||||
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
|
||||
of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
@@ -38,6 +38,11 @@ the reverse proxy and the homeserver.
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
# For the federation port
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
|
||||
server_name matrix.example.com;
|
||||
|
||||
location /_matrix {
|
||||
@@ -48,17 +53,6 @@ server {
|
||||
client_max_body_size 10M;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
server_name example.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**NOTE**: Do not add a path after the port in `proxy_pass`, otherwise nginx will
|
||||
@@ -145,3 +139,10 @@ client IP addresses are recorded correctly.
|
||||
Having done so, you can then use `https://matrix.example.com` (instead
|
||||
of `https://matrix.example.com:8448`) as the "Custom server" when
|
||||
connecting to Synapse from a client.
|
||||
|
||||
|
||||
## Health check endpoint
|
||||
|
||||
Synapse exposes a health check endpoint for use by reverse proxies.
|
||||
Each configured HTTP listener has a `/health` endpoint which always returns
|
||||
200 OK (and doesn't get logged).
|
||||
|
||||
@@ -10,6 +10,17 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
# s = second
|
||||
# m = minute
|
||||
# h = hour
|
||||
# d = day
|
||||
# w = week
|
||||
# y = year
|
||||
# For example, setting redaction_retention_period: 5m would remove redacted
|
||||
# messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
################################################################################
|
||||
|
||||
# Configuration file for Synapse.
|
||||
@@ -102,7 +113,9 @@ pid_file: DATADIR/homeserver.pid
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
# Uncomment the following to increase the limit to 5000.
|
||||
#
|
||||
#filter_timeline_limit: 5000
|
||||
|
||||
@@ -118,38 +131,6 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#enable_search: false
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
# inbound federation traffic as early as possible, rather than relying
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
#federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# Prevent federation requests from being sent to the following
|
||||
# blacklist IP address CIDR ranges. If this option is not specified, or
|
||||
# specified with an empty list, no ip range blacklist will be enforced.
|
||||
#
|
||||
# As of Synapse v1.4.0 this option also affects any outbound requests to identity
|
||||
# servers provided by user input.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
federation_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
#
|
||||
@@ -178,7 +159,7 @@ federation_ip_range_blacklist:
|
||||
# names: a list of names of HTTP resources. See below for a list of
|
||||
# valid resource names.
|
||||
#
|
||||
# compress: set to true to enable HTTP comression for this resource.
|
||||
# compress: set to true to enable HTTP compression for this resource.
|
||||
#
|
||||
# additional_resources: Only valid for an 'http' listener. A map of
|
||||
# additional endpoints which should be loaded via dynamic modules.
|
||||
@@ -283,7 +264,7 @@ listeners:
|
||||
# number of monthly active users.
|
||||
#
|
||||
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||
# enabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||
#
|
||||
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||
@@ -344,6 +325,10 @@ limit_remote_rooms:
|
||||
#
|
||||
#complexity_error: "This room is too complex."
|
||||
|
||||
# allow server admins to join complex rooms. Default is false.
|
||||
#
|
||||
#admins_can_join: true
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
@@ -608,6 +593,39 @@ acme:
|
||||
|
||||
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
# inbound federation traffic as early as possible, rather than relying
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
#federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# Prevent federation requests from being sent to the following
|
||||
# blacklist IP address CIDR ranges. If this option is not specified, or
|
||||
# specified with an empty list, no ip range blacklist will be enforced.
|
||||
#
|
||||
# As of Synapse v1.4.0 this option also affects any outbound requests to identity
|
||||
# servers provided by user input.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
federation_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
# Caching can be configured through the following options.
|
||||
@@ -682,7 +700,7 @@ caches:
|
||||
#database:
|
||||
# name: psycopg2
|
||||
# args:
|
||||
# user: synapse
|
||||
# user: synapse_user
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
@@ -728,6 +746,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
# - one for ratelimiting redactions by room admins. If this is not explicitly
|
||||
# set then it uses the same ratelimiting as per rc_message. This is useful
|
||||
# to allow room admins to deal with abuse quickly.
|
||||
# - two for ratelimiting number of rooms a user can join, "local" for when
|
||||
# users are joining rooms the server is already in (this is cheap) vs
|
||||
# "remote" for when users are trying to join rooms not on the server (which
|
||||
# can be more expensive)
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
@@ -753,6 +775,14 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#rc_admin_redaction:
|
||||
# per_second: 1
|
||||
# burst_count: 50
|
||||
#
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
|
||||
|
||||
# Ratelimiting settings for incoming federation
|
||||
@@ -1142,24 +1172,6 @@ account_validity:
|
||||
#
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
|
||||
# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
|
||||
# background migration script, informing itself that the identity server all of its
|
||||
# 3PIDs have been bound to is likely one of the below.
|
||||
#
|
||||
# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
|
||||
# it is now solely used for the purposes of the background migration script, and can be
|
||||
# removed once it has run.
|
||||
#trusted_third_party_id_servers:
|
||||
# - matrix.org
|
||||
# - vector.im
|
||||
|
||||
# Handle threepid (email/phone etc) registration and password resets through a set of
|
||||
# *trusted* identity servers. Note that this allows the configured identity server to
|
||||
# reset passwords for accounts!
|
||||
@@ -1210,7 +1222,11 @@ account_threepid_delegates:
|
||||
#enable_3pid_changes: false
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
# to these rooms.
|
||||
#
|
||||
# By default, any room aliases included in this list will be created
|
||||
# as a publicly joinable room when the first user registers for the
|
||||
# homeserver. This behaviour can be customised with the settings below.
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
@@ -1218,10 +1234,69 @@ account_threepid_delegates:
|
||||
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||
# the rooms exist by creating them when the first user on the
|
||||
# homeserver registers.
|
||||
#
|
||||
# By default the auto-created rooms are publicly joinable from any federated
|
||||
# server. Use the autocreate_auto_join_rooms_federated and
|
||||
# autocreate_auto_join_room_preset settings below to customise this behaviour.
|
||||
#
|
||||
# Setting to false means that if the rooms are not manually created,
|
||||
# users cannot be auto-joined since they do not exist.
|
||||
#
|
||||
#autocreate_auto_join_rooms: true
|
||||
# Defaults to true. Uncomment the following line to disable automatically
|
||||
# creating auto-join rooms.
|
||||
#
|
||||
#autocreate_auto_join_rooms: false
|
||||
|
||||
# Whether the auto_join_rooms that are auto-created are available via
|
||||
# federation. Only has an effect if autocreate_auto_join_rooms is true.
|
||||
#
|
||||
# Note that whether a room is federated cannot be modified after
|
||||
# creation.
|
||||
#
|
||||
# Defaults to true: the room will be joinable from other servers.
|
||||
# Uncomment the following to prevent users from other homeservers from
|
||||
# joining these rooms.
|
||||
#
|
||||
#autocreate_auto_join_rooms_federated: false
|
||||
|
||||
# The room preset to use when auto-creating one of auto_join_rooms. Only has an
|
||||
# effect if autocreate_auto_join_rooms is true.
|
||||
#
|
||||
# This can be one of "public_chat", "private_chat", or "trusted_private_chat".
|
||||
# If a value of "private_chat" or "trusted_private_chat" is used then
|
||||
# auto_join_mxid_localpart must also be configured.
|
||||
#
|
||||
# Defaults to "public_chat", meaning that the room is joinable by anyone, including
|
||||
# federated servers if autocreate_auto_join_rooms_federated is true (the default).
|
||||
# Uncomment the following to require an invitation to join these rooms.
|
||||
#
|
||||
#autocreate_auto_join_room_preset: private_chat
|
||||
|
||||
# The local part of the user id which is used to create auto_join_rooms if
|
||||
# autocreate_auto_join_rooms is true. If this is not provided then the
|
||||
# initial user account that registers will be used to create the rooms.
|
||||
#
|
||||
# The user id is also used to invite new users to any auto-join rooms which
|
||||
# are set to invite-only.
|
||||
#
|
||||
# It *must* be configured if autocreate_auto_join_room_preset is set to
|
||||
# "private_chat" or "trusted_private_chat".
|
||||
#
|
||||
# Note that this must be specified in order for new users to be correctly
|
||||
# invited to any auto-join rooms which have been set to invite-only (either
|
||||
# at the time of creation or subsequently).
|
||||
#
|
||||
# Note that, if the room already exists, this user must be joined and
|
||||
# have the appropriate permissions to invite new members.
|
||||
#
|
||||
#auto_join_mxid_localpart: system
|
||||
|
||||
# When auto_join_rooms is specified, setting this flag to false prevents
|
||||
# guest accounts from being automatically joined to the rooms.
|
||||
#
|
||||
# Defaults to true.
|
||||
#
|
||||
#auto_join_rooms_for_guests: false
|
||||
|
||||
|
||||
## Metrics ###
|
||||
@@ -1379,6 +1454,8 @@ trusted_key_servers:
|
||||
#key_server_signing_keys_path: "key_server_signing_keys.key"
|
||||
|
||||
|
||||
## Single sign-on integration ##
|
||||
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
#
|
||||
# At least one of `sp_config` or `config_path` must be set in this section to
|
||||
@@ -1445,7 +1522,7 @@ saml2_config:
|
||||
|
||||
# The lifetime of a SAML session. This defines how long a user has to
|
||||
# complete the authentication process, if allow_unsolicited is unset.
|
||||
# The default is 5 minutes.
|
||||
# The default is 15 minutes.
|
||||
#
|
||||
#saml_session_lifetime: 5m
|
||||
|
||||
@@ -1500,6 +1577,17 @@ saml2_config:
|
||||
#
|
||||
#grandfathered_mxid_source_attribute: upn
|
||||
|
||||
# It is possible to configure Synapse to only allow logins if SAML attributes
|
||||
# match particular values. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted.
|
||||
#
|
||||
#attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "staff"
|
||||
# - attribute: department
|
||||
# value: "sales"
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
@@ -1512,7 +1600,13 @@ saml2_config:
|
||||
# * HTML page to display to users if something goes wrong during the
|
||||
# authentication process: 'saml_error.html'.
|
||||
#
|
||||
# This template doesn't currently need any variable to render.
|
||||
# When rendering, this template is given the following variables:
|
||||
# * code: an HTML error code corresponding to the error that is being
|
||||
# returned (typically 400 or 500)
|
||||
#
|
||||
# * msg: a textual message describing the error.
|
||||
#
|
||||
# The variables will automatically be HTML-escaped.
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
@@ -1520,92 +1614,119 @@ saml2_config:
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
# Enable OpenID Connect for registration and login. Uses authlib.
|
||||
# OpenID Connect integration. The following settings can be used to make Synapse
|
||||
# use an OpenID Connect Provider for authentication, instead of its internal
|
||||
# password database.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md.
|
||||
#
|
||||
oidc_config:
|
||||
# enable OpenID Connect. Defaults to false.
|
||||
#
|
||||
#enabled: true
|
||||
# Uncomment the following to enable authorization against an OpenID Connect
|
||||
# server. Defaults to false.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# use the OIDC discovery mechanism to discover endpoints. Defaults to true.
|
||||
#
|
||||
#discover: true
|
||||
# Uncomment the following to disable use of the OIDC discovery mechanism to
|
||||
# discover endpoints. Defaults to true.
|
||||
#
|
||||
#discover: false
|
||||
|
||||
# the OIDC issuer. Used to validate tokens and discover the providers endpoints. Required.
|
||||
#
|
||||
#issuer: "https://accounts.example.com/"
|
||||
# the OIDC issuer. Used to validate tokens and (if discovery is enabled) to
|
||||
# discover the provider's endpoints.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#issuer: "https://accounts.example.com/"
|
||||
|
||||
# oauth2 client id to use. Required.
|
||||
#
|
||||
#client_id: "provided-by-your-issuer"
|
||||
# oauth2 client id to use.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#client_id: "provided-by-your-issuer"
|
||||
|
||||
# oauth2 client secret to use. Required.
|
||||
#
|
||||
#client_secret: "provided-by-your-issuer"
|
||||
# oauth2 client secret to use.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#client_secret: "provided-by-your-issuer"
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are "client_secret_basic" (default), "client_secret_post" and "none".
|
||||
#
|
||||
#client_auth_method: "client_secret_basic"
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
# 'none'.
|
||||
#
|
||||
#client_auth_method: client_secret_post
|
||||
|
||||
# list of scopes to ask. This should include the "openid" scope. Defaults to ["openid"].
|
||||
#
|
||||
#scopes: ["openid"]
|
||||
# list of scopes to request. This should normally include the "openid" scope.
|
||||
# Defaults to ["openid"].
|
||||
#
|
||||
#scopes: ["openid", "profile"]
|
||||
|
||||
# the oauth2 authorization endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#authorization_endpoint: "https://accounts.example.com/oauth2/auth"
|
||||
# the oauth2 authorization endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#authorization_endpoint: "https://accounts.example.com/oauth2/auth"
|
||||
|
||||
# the oauth2 token endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
# the oauth2 token endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
|
||||
# the OIDC userinfo endpoint. Required if discovery is disabled and the "openid" scope is not asked.
|
||||
#
|
||||
#userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
# the OIDC userinfo endpoint. Required if discovery is disabled and the
|
||||
# "openid" scope is not requested.
|
||||
#
|
||||
#userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
|
||||
# URI where to fetch the JWKS. Required if discovery is disabled and the "openid" scope is used.
|
||||
#
|
||||
#jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
# URI where to fetch the JWKS. Required if discovery is disabled and the
|
||||
# "openid" scope is used.
|
||||
#
|
||||
#jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
|
||||
# skip metadata verification. Defaults to false.
|
||||
# Use this if you are connecting to a provider that is not OpenID Connect compliant.
|
||||
# Avoid this in production.
|
||||
#
|
||||
#skip_verification: false
|
||||
# Uncomment to skip metadata verification. Defaults to false.
|
||||
#
|
||||
# Use this if you are connecting to a provider that is not OpenID Connect
|
||||
# compliant.
|
||||
# Avoid this in production.
|
||||
#
|
||||
#skip_verification: true
|
||||
|
||||
|
||||
# An external module can be provided here as a custom solution to mapping
|
||||
# attributes returned from a OIDC provider onto a matrix user.
|
||||
# An external module can be provided here as a custom solution to mapping
|
||||
# attributes returned from a OIDC provider onto a matrix user.
|
||||
#
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
# Default is 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
#
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
# Default is 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
#module: mapping_provider.OidcMappingProvider
|
||||
|
||||
# Custom configuration values for the module. This section will be passed as
|
||||
# a Python dictionary to the user mapping provider module's `parse_config`
|
||||
# method.
|
||||
#
|
||||
# The examples below are intended for the default provider: they should be
|
||||
# changed if using a custom provider.
|
||||
#
|
||||
config:
|
||||
# name of the claim containing a unique identifier for the user.
|
||||
# Defaults to `sub`, which OpenID Connect compliant providers should provide.
|
||||
#
|
||||
#module: mapping_provider.OidcMappingProvider
|
||||
#subject_claim: "sub"
|
||||
|
||||
# Custom configuration values for the module. Below options are intended
|
||||
# for the built-in provider, they should be changed if using a custom
|
||||
# module. This section will be passed as a Python dictionary to the
|
||||
# module's `parse_config` method.
|
||||
# Jinja2 template for the localpart of the MXID.
|
||||
#
|
||||
# Below is the config of the default mapping provider, based on Jinja2
|
||||
# templates. Those templates are used to render user attributes, where the
|
||||
# userinfo object is available through the `user` variable.
|
||||
# When rendering, this template is given the following variables:
|
||||
# * user: The claims returned by the UserInfo Endpoint and/or in the ID
|
||||
# Token
|
||||
#
|
||||
config:
|
||||
# name of the claim containing a unique identifier for the user.
|
||||
# Defaults to `sub`, which OpenID Connect compliant providers should provide.
|
||||
#
|
||||
#subject_claim: "sub"
|
||||
# This must be configured if using the default mapping provider.
|
||||
#
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
|
||||
# Jinja2 template for the localpart of the MXID
|
||||
#
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
|
||||
# Jinja2 template for the display name to set on first login. Optional.
|
||||
#
|
||||
#display_name_template: "{{ user.given_name }} {{ user.last_name }}"
|
||||
# Jinja2 template for the display name to set on first login.
|
||||
#
|
||||
# If unset, no displayname will be set.
|
||||
#
|
||||
#display_name_template: "{{ user.given_name }} {{ user.last_name }}"
|
||||
|
||||
|
||||
|
||||
@@ -1620,7 +1741,8 @@ oidc_config:
|
||||
# # name: value
|
||||
|
||||
|
||||
# Additional settings to use with single-sign on systems such as SAML2 and CAS.
|
||||
# Additional settings to use with single-sign on systems such as OpenID Connect,
|
||||
# SAML2 and CAS.
|
||||
#
|
||||
sso:
|
||||
# A list of client URLs which are whitelisted so that the user does not
|
||||
@@ -1705,12 +1827,60 @@ sso:
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
# JSON web token integration. The following settings can be used to make
|
||||
# Synapse JSON web tokens for authentication, instead of its internal
|
||||
# password database.
|
||||
#
|
||||
# Each JSON Web Token needs to contain a "sub" (subject) claim, which is
|
||||
# used as the localpart of the mxid.
|
||||
#
|
||||
# Additionally, the expiration time ("exp"), not before time ("nbf"),
|
||||
# and issued at ("iat") claims are validated if present.
|
||||
#
|
||||
# Note that this is a non-standard login type and client support is
|
||||
# expected to be non-existant.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
|
||||
#
|
||||
#jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
# Uncomment the following to enable authorization using JSON web
|
||||
# tokens. Defaults to false.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# This is either the private shared secret or the public key used to
|
||||
# decode the contents of the JSON web token.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#secret: "provided-by-your-issuer"
|
||||
|
||||
# The algorithm used to sign the JSON web token.
|
||||
#
|
||||
# Supported algorithms are listed at
|
||||
# https://pyjwt.readthedocs.io/en/latest/algorithms.html
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#algorithm: "provided-by-your-issuer"
|
||||
|
||||
# The issuer to validate the "iss" claim against.
|
||||
#
|
||||
# Optional, if provided the "iss" claim will be required and
|
||||
# validated for all JSON web tokens.
|
||||
#
|
||||
#issuer: "provided-by-your-issuer"
|
||||
|
||||
# A list of audiences to validate the "aud" claim against.
|
||||
#
|
||||
# Optional, if provided the "aud" claim will be required and
|
||||
# validated for all JSON web tokens.
|
||||
#
|
||||
# Note that if the "aud" claim is included in a JSON web token then
|
||||
# validation will fail without configuring audiences.
|
||||
#
|
||||
#audiences:
|
||||
# - "provided-by-your-issuer"
|
||||
|
||||
|
||||
password_config:
|
||||
@@ -1801,8 +1971,8 @@ email:
|
||||
#
|
||||
#notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
|
||||
# app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# defaults to 'Matrix'.
|
||||
# app_name defines the default value for '%(app)s' in notif_from and email
|
||||
# subjects. It defaults to 'Matrix'.
|
||||
#
|
||||
#app_name: my_branded_matrix_server
|
||||
|
||||
@@ -1832,9 +2002,7 @@ email:
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
# Do not uncomment this setting unless you want to customise the templates.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
@@ -1871,6 +2039,73 @@ email:
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# Subjects to use when sending emails from Synapse.
|
||||
#
|
||||
# The placeholder '%(app)s' will be replaced with the value of the 'app_name'
|
||||
# setting above, or by a value dictated by the Matrix client application.
|
||||
#
|
||||
# If a subject isn't overridden in this configuration file, the value used as
|
||||
# its example will be used.
|
||||
#
|
||||
#subjects:
|
||||
|
||||
# Subjects for notification emails.
|
||||
#
|
||||
# On top of the '%(app)s' placeholder, these can use the following
|
||||
# placeholders:
|
||||
#
|
||||
# * '%(person)s', which will be replaced by the display name of the user(s)
|
||||
# that sent the message(s), e.g. "Alice and Bob".
|
||||
# * '%(room)s', which will be replaced by the name of the room the
|
||||
# message(s) have been sent to, e.g. "My super room".
|
||||
#
|
||||
# See the example provided for each setting to see which placeholder can be
|
||||
# used and how to use them.
|
||||
#
|
||||
# Subject to use to notify about one message from one or more user(s) in a
|
||||
# room which has a name.
|
||||
#message_from_person_in_room: "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..."
|
||||
#
|
||||
# Subject to use to notify about one message from one or more user(s) in a
|
||||
# room which doesn't have a name.
|
||||
#message_from_person: "[%(app)s] You have a message on %(app)s from %(person)s..."
|
||||
#
|
||||
# Subject to use to notify about multiple messages from one or more users in
|
||||
# a room which doesn't have a name.
|
||||
#messages_from_person: "[%(app)s] You have messages on %(app)s from %(person)s..."
|
||||
#
|
||||
# Subject to use to notify about multiple messages in a room which has a
|
||||
# name.
|
||||
#messages_in_room: "[%(app)s] You have messages on %(app)s in the %(room)s room..."
|
||||
#
|
||||
# Subject to use to notify about multiple messages in multiple rooms.
|
||||
#messages_in_room_and_others: "[%(app)s] You have messages on %(app)s in the %(room)s room and others..."
|
||||
#
|
||||
# Subject to use to notify about multiple messages from multiple persons in
|
||||
# multiple rooms. This is similar to the setting above except it's used when
|
||||
# the room in which the notification was triggered has no name.
|
||||
#messages_from_person_and_others: "[%(app)s] You have messages on %(app)s from %(person)s and others..."
|
||||
#
|
||||
# Subject to use to notify about an invite to a room which has a name.
|
||||
#invite_from_person_to_room: "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..."
|
||||
#
|
||||
# Subject to use to notify about an invite to a room which doesn't have a
|
||||
# name.
|
||||
#invite_from_person: "[%(app)s] %(person)s has invited you to chat on %(app)s..."
|
||||
|
||||
# Subject for emails related to account administration.
|
||||
#
|
||||
# On top of the '%(app)s' placeholder, these one can use the
|
||||
# '%(server_name)s' placeholder, which will be replaced by the value of the
|
||||
# 'server_name' setting in your Synapse configuration.
|
||||
#
|
||||
# Subject to use when sending a password reset email.
|
||||
#password_reset: "[%(server_name)s] Password reset"
|
||||
#
|
||||
# Subject to use when sending a verification email to assert an address's
|
||||
# ownership.
|
||||
#email_validation: "[%(server_name)s] Validate your email"
|
||||
|
||||
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
@@ -1930,6 +2165,26 @@ spam_checker:
|
||||
# example_stop_events_from: ['@bad:example.com']
|
||||
|
||||
|
||||
## Rooms ##
|
||||
|
||||
# Controls whether locally-created rooms should be end-to-end encrypted by
|
||||
# default.
|
||||
#
|
||||
# Possible options are "all", "invite", and "off". They are defined as:
|
||||
#
|
||||
# * "all": any locally-created room
|
||||
# * "invite": any room created with the "private_chat" or "trusted_private_chat"
|
||||
# room creation presets
|
||||
# * "off": this option will take no effect
|
||||
#
|
||||
# The default value is "off".
|
||||
#
|
||||
# Note that this option will only affect rooms created after it is set. It
|
||||
# will also not affect rooms created by other servers.
|
||||
#
|
||||
#encryption_enabled_by_default_for_room_type: invite
|
||||
|
||||
|
||||
# Uncomment to allow non-server-admin users to create groups on this server
|
||||
#
|
||||
#enable_group_creation: true
|
||||
@@ -2161,3 +2416,57 @@ opentracing:
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
|
||||
## Workers ##
|
||||
|
||||
# Disables sending of outbound federation transactions on the main process.
|
||||
# Uncomment if using a federation sender worker.
|
||||
#
|
||||
#send_federation: false
|
||||
|
||||
# It is possible to run multiple federation sender workers, in which case the
|
||||
# work is balanced across them.
|
||||
#
|
||||
# This configuration must be shared between all federation sender workers, and if
|
||||
# changed all federation sender workers must be stopped at the same time and then
|
||||
# started, to ensure that all instances are running with the same config (otherwise
|
||||
# events may be dropped).
|
||||
#
|
||||
#federation_sender_instances:
|
||||
# - federation_sender1
|
||||
|
||||
# When using workers this should be a map from `worker_name` to the
|
||||
# HTTP replication listener of the worker, if configured.
|
||||
#
|
||||
#instance_map:
|
||||
# worker1:
|
||||
# host: localhost
|
||||
# port: 8034
|
||||
|
||||
# Experimental: When using workers you can define which workers should
|
||||
# handle event persistence and typing notifications. Any worker
|
||||
# specified here must also be in the `instance_map`.
|
||||
#
|
||||
#stream_writers:
|
||||
# events: worker1
|
||||
# typing: worker1
|
||||
|
||||
|
||||
# Configuration for Redis when using workers. This *must* be enabled when
|
||||
# using workers (unless using old style direct TCP configuration).
|
||||
#
|
||||
redis:
|
||||
# Uncomment the below to enable Redis support.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Optional host and port to use to connect to redis. Defaults to
|
||||
# localhost and 6379
|
||||
#
|
||||
#host: localhost
|
||||
#port: 6379
|
||||
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
@@ -11,24 +11,33 @@ formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: /var/log/matrix-synapse/homeserver.log
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
when: midnight
|
||||
backupCount: 3 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# A handler that writes logs to stderr. Unused by default, but can be used
|
||||
# instead of "buffer" and "file" in the logger handlers.
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
@@ -36,8 +45,23 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [file, console]
|
||||
|
||||
# Write logs to the `buffer` handler, which will buffer them together in memory,
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuation for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
||||
disable_existing_loggers: false
|
||||
|
||||
@@ -138,6 +138,8 @@ A custom mapping provider must specify the following methods:
|
||||
* `mxid_localpart` - Required. The mxid localpart of the new user.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `emails` - A list of emails for the new user. If not provided, will
|
||||
default to an empty list.
|
||||
|
||||
### Default SAML Mapping Provider
|
||||
|
||||
|
||||
32
docs/synctl_workers.md
Normal file
32
docs/synctl_workers.md
Normal file
@@ -0,0 +1,32 @@
|
||||
### Using synctl with workers
|
||||
|
||||
If you want to use `synctl` to manage your synapse processes, you will need to
|
||||
create an an additional configuration file for the main synapse process. That
|
||||
configuration should look like this:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.homeserver
|
||||
```
|
||||
|
||||
Additionally, each worker app must be configured with the name of a "pid file",
|
||||
to which it will write its process ID when it starts. For example, for a
|
||||
synchrotron, you might write:
|
||||
|
||||
```yaml
|
||||
worker_pid_file: /home/matrix/synapse/worker1.pid
|
||||
```
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.:
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl:
|
||||
|
||||
synctl -w $CONFIG/workers/worker1.yaml restart
|
||||
@@ -1,7 +1,7 @@
|
||||
worker_app: synapse.app.federation_reader
|
||||
worker_name: federation_reader1
|
||||
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
|
||||
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
513
docs/workers.md
513
docs/workers.md
@@ -1,10 +1,10 @@
|
||||
# Scaling synapse via workers
|
||||
|
||||
For small instances it recommended to run Synapse in monolith mode (the
|
||||
default). For larger instances where performance is a concern it can be helpful
|
||||
to split out functionality into multiple separate python processes. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
For small instances it recommended to run Synapse in the default monolith mode.
|
||||
For larger instances where performance is a concern it can be helpful to split
|
||||
out functionality into multiple separate python processes. These processes are
|
||||
called 'workers', and are (eventually) intended to scale horizontally
|
||||
independently.
|
||||
|
||||
Synapse's worker support is under active development and subject to change as
|
||||
we attempt to rapidly scale ever larger Synapse instances. However we are
|
||||
@@ -16,69 +16,123 @@ workers only work with PostgreSQL-based Synapse deployments. SQLite should only
|
||||
be used for demo purposes and any admin considering workers should already be
|
||||
running PostgreSQL.
|
||||
|
||||
## Master/worker communication
|
||||
## Main process/worker communication
|
||||
|
||||
The workers communicate with the master process via a Synapse-specific protocol
|
||||
called 'replication' (analogous to MySQL- or Postgres-style database
|
||||
replication) which feeds a stream of relevant data from the master to the
|
||||
workers so they can be kept in sync with the master process and database state.
|
||||
The processes communicate with each other via a Synapse-specific protocol called
|
||||
'replication' (analogous to MySQL- or Postgres-style database replication) which
|
||||
feeds streams of newly written data between processes so they can be kept in
|
||||
sync with the database state.
|
||||
|
||||
Additionally, workers may make HTTP requests to the master, to send information
|
||||
in the other direction. Typically this is used for operations which need to
|
||||
wait for a reply - such as sending an event.
|
||||
When configured to do so, Synapse uses a
|
||||
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
|
||||
stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
|
||||
## Configuration
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
See the [Architectural diagram](#architectural-diagram) section at the end for
|
||||
a visualisation of what this looks like.
|
||||
|
||||
|
||||
## Setting up workers
|
||||
|
||||
A Redis server is required to manage the communication between the processes.
|
||||
The Redis server should be installed following the normal procedure for your
|
||||
distribution (e.g. `apt install redis-server` on Debian). It is safe to use an
|
||||
existing Redis deployment if you have one.
|
||||
|
||||
Once installed, check that Redis is running and accessible from the host running
|
||||
Synapse, for example by executing `echo PING | nc -q1 localhost 6379` and seeing
|
||||
a response of `+PONG`.
|
||||
|
||||
The appropriate dependencies must also be installed for Synapse. If using a
|
||||
virtualenv, these can be installed with:
|
||||
|
||||
```sh
|
||||
pip install matrix-synapse[redis]
|
||||
```
|
||||
|
||||
Note that these dependencies are included when synapse is installed with `pip
|
||||
install matrix-synapse[all]`. They are also included in the debian packages from
|
||||
`matrix.org` and in the docker images at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse/.
|
||||
|
||||
To make effective use of the workers, you will need to configure an HTTP
|
||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||
the correct worker, or to the main synapse instance. Note that this includes
|
||||
requests made to the federation port. See [reverse_proxy.md](reverse_proxy.md)
|
||||
for information on setting up a reverse proxy.
|
||||
the correct worker, or to the main synapse instance. See
|
||||
[reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
|
||||
proxy.
|
||||
|
||||
When using workers, each worker process has its own configuration file which
|
||||
contains settings specific to that worker, such as the HTTP listener that it
|
||||
provides (if any), logging configuration, etc.
|
||||
|
||||
Normally, the worker processes are configured to read from a shared
|
||||
configuration file as well as the worker-specific configuration files. This
|
||||
makes it easier to keep common configuration settings synchronised across all
|
||||
the processes.
|
||||
|
||||
The main process is somewhat special in this respect: it does not normally
|
||||
need its own configuration file and can take all of its configuration from the
|
||||
shared configuration file.
|
||||
|
||||
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. For example:
|
||||
|
||||
To enable workers, you need to add *two* replication listeners to the
|
||||
main Synapse configuration file (`homeserver.yaml`). For example:
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
# main process will listen on.
|
||||
listeners:
|
||||
# The TCP replication port
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: replication
|
||||
|
||||
# The HTTP replication port
|
||||
- port: 9093
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
Under **no circumstances** should these replication API listeners be exposed to
|
||||
the public internet; they have no authentication and are unencrypted.
|
||||
See the sample config for the full documentation of each option.
|
||||
|
||||
You should then create a set of configs for the various worker processes. Each
|
||||
worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that
|
||||
worker, e.g. the HTTP listener that it provides (if any); logging
|
||||
configuration; etc. You should minimise the number of overrides though to
|
||||
maintain a usable config.
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; it has no authentication and is unencrypted.
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify the type of worker
|
||||
application (`worker_app`). The currently available worker applications are
|
||||
listed below. You must also specify the replication endpoints that it should
|
||||
talk to on the main synapse process. `worker_replication_host` should specify
|
||||
the host of the main synapse, `worker_replication_port` should point to the TCP
|
||||
replication listener port and `worker_replication_http_port` should point to
|
||||
the HTTP replication port.
|
||||
application (`worker_app`), and you should specify a unqiue name for the worker
|
||||
(`worker_name`). The currently available worker applications are listed below.
|
||||
You must also specify the HTTP replication endpoint that it should talk to on
|
||||
the main synapse process. `worker_replication_host` should specify the host of
|
||||
the main synapse and `worker_replication_http_port` should point to the HTTP
|
||||
replication port. If the worker will handle HTTP requests then the
|
||||
`worker_listeners` option should be set with a `http` listener, in the same way
|
||||
as the `listeners` option in the shared config.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.synchrotron
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: worker1
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
@@ -87,142 +141,43 @@ worker_listeners:
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- federation
|
||||
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
|
||||
```
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP `/sync` endpoint on port 8083 separately from the `/sync` endpoint provided
|
||||
by the main synapse.
|
||||
...is a full configuration for a generic worker instance, which will expose a
|
||||
plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
`/sync`, which are listed below.
|
||||
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
`synctl` or your distribution's preferred service manager such as `systemd`. We
|
||||
recommend the use of `systemd` where available: for information on setting up
|
||||
`systemd` to start synapse workers, see
|
||||
[systemd-with-workers](systemd-with-workers). To use `synctl`, see below.
|
||||
[systemd-with-workers](systemd-with-workers). To use `synctl`, see
|
||||
[synctl_workers.md](synctl_workers.md).
|
||||
|
||||
### **Experimental** support for replication over redis
|
||||
|
||||
As of Synapse v1.13.0, it is possible to configure Synapse to send replication
|
||||
via a [Redis pub/sub channel](https://redis.io/topics/pubsub). This is an
|
||||
alternative to direct TCP connections to the master: rather than all the
|
||||
workers connecting to the master, all the workers and the master connect to
|
||||
Redis, which relays replication commands between processes. This can give a
|
||||
significant cpu saving on the master and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
Note that this support is currently experimental; you may experience lost
|
||||
messages and similar problems! It is strongly recommended that admins setting
|
||||
up workers for the first time use direct TCP replication as above.
|
||||
|
||||
To configure Synapse to use Redis:
|
||||
|
||||
1. Install Redis following the normal procedure for your distribution - for
|
||||
example, on Debian, `apt install redis-server`. (It is safe to use an
|
||||
existing Redis deployment if you have one: we use a pub/sub stream named
|
||||
according to the `server_name` of your synapse server.)
|
||||
2. Check Redis is running and accessible: you should be able to `echo PING | nc -q1
|
||||
localhost 6379` and get a response of `+PONG`.
|
||||
3. Install the python prerequisites. If you installed synapse into a
|
||||
virtualenv, this can be done with:
|
||||
```sh
|
||||
pip install matrix-synapse[redis]
|
||||
```
|
||||
The debian packages from matrix.org already include the required
|
||||
dependencies.
|
||||
4. Add config to the shared configuration (`homeserver.yaml`):
|
||||
```yaml
|
||||
redis:
|
||||
enabled: true
|
||||
```
|
||||
Optional parameters which can go alongside `enabled` are `host`, `port`,
|
||||
`password`. Normally none of these are required.
|
||||
5. Restart master and all workers.
|
||||
|
||||
Once redis replication is in use, `worker_replication_port` is redundant and
|
||||
can be removed from the worker configuration files. Similarly, the
|
||||
configuration for the `listener` for the TCP replication port can be removed
|
||||
from the main configuration file. Note that the HTTP replication port is
|
||||
still required.
|
||||
|
||||
### Using synctl
|
||||
|
||||
If you want to use `synctl` to manage your synapse processes, you will need to
|
||||
create an an additional configuration file for the master synapse process. That
|
||||
configuration should look like this:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.homeserver
|
||||
```
|
||||
|
||||
Additionally, each worker app must be configured with the name of a "pid file",
|
||||
to which it will write its process ID when it starts. For example, for a
|
||||
synchrotron, you might write:
|
||||
|
||||
```yaml
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
```
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.:
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl:
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
## Available worker applications
|
||||
|
||||
### `synapse.app.pusher`
|
||||
### `synapse.app.generic_worker`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
shared configuration file to stop the main synapse sending these notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.synchrotron`
|
||||
|
||||
The synchrotron handles `sync` requests from clients. In particular, it can
|
||||
handle REST endpoints matching the following regular expressions:
|
||||
This worker can handle API requests matching the following regular
|
||||
expressions:
|
||||
|
||||
# Sync requests
|
||||
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||
|
||||
The above endpoints should all be routed to the synchrotron worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
It is possible to run multiple instances of the synchrotron to scale
|
||||
horizontally. In this case the reverse-proxy should be configured to
|
||||
load-balance across the instances, though it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. Extracting
|
||||
a userid from the access token is currently left as an exercise for the reader.
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||
shared configuration file to stop the main synapse sending these notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.federation_reader`
|
||||
|
||||
Handles a subset of federation endpoints. In particular, it can handle REST
|
||||
endpoints matching the following regular expressions:
|
||||
|
||||
# Federation requests
|
||||
^/_matrix/federation/v1/event/
|
||||
^/_matrix/federation/v1/state/
|
||||
^/_matrix/federation/v1/state_ids/
|
||||
@@ -242,40 +197,145 @@ endpoints matching the following regular expressions:
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/federation/v1/send/
|
||||
^/_matrix/federation/v1/get_groups_publicised$
|
||||
^/_matrix/key/v2/query
|
||||
|
||||
# Inbound federation transaction request
|
||||
^/_matrix/federation/v1/send/
|
||||
|
||||
# Client API requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/federation/v1/groups/
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
Pagination requests can also be handled, but all requests for a given
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
ensure that the purge history admin API is not used while pagination requests
|
||||
for the room are in flight:
|
||||
|
||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
||||
instance.
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
|
||||
|
||||
Note that `federation` must be added to the listener resources in the worker config:
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
configured in the `worker_listeners` option in the worker config.
|
||||
|
||||
|
||||
#### Load balancing
|
||||
|
||||
It is possible to run multiple instances of this worker app, with incoming requests
|
||||
being load-balanced between them by the reverse-proxy. However, different endpoints
|
||||
have different characteristics and so admins
|
||||
may wish to run multiple groups of workers handling different endpoints so that
|
||||
load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. Extracting a
|
||||
user ID from the access token or `Authorization` header is currently left as an
|
||||
exercise for the reader. Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
`/initialSync`), as requests that don't are known as "initial sync" that happens
|
||||
when a user logs in on a new device and can be *very* resource intensive, so
|
||||
isolating these requests will stop them from interfering with other users ongoing
|
||||
syncs.
|
||||
|
||||
Federation and client requests can be balanced via simple round robin.
|
||||
|
||||
The inbound federation transaction request `^/_matrix/federation/v1/send/`
|
||||
should be balanced by source IP so that transactions from the same remote server
|
||||
go to the same process.
|
||||
|
||||
Registration/login requests can be handled separately purely to help ensure that
|
||||
unexpected load doesn't affect new logins and sign ups.
|
||||
|
||||
Finally, event sending requests can be balanced by the room ID in the URI (or
|
||||
the full URI, or even just round robin), the room ID is the path component after
|
||||
`/rooms/`. If there is a large bridge connected that is sending or may send lots
|
||||
of events, then a dedicated set of workers can be provisioned to limit the
|
||||
effects of bursts of events from that bridge on events sent by normal users.
|
||||
|
||||
#### Stream writers
|
||||
|
||||
Additionally, there is *experimental* support for moving writing of specific
|
||||
streams (such as events) off of the main process to a particular worker. (This
|
||||
is only supported with Redis-based replication.)
|
||||
|
||||
Currently support streams are `events` and `typing`.
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. For example to
|
||||
move event persistence off to a dedicated worker, the shared configuration would
|
||||
include:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.federation_reader
|
||||
...
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: <port>
|
||||
resources:
|
||||
- names:
|
||||
- federation
|
||||
instance_map:
|
||||
event_persister1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
stream_writers:
|
||||
events: event_persister1
|
||||
```
|
||||
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||
shared configuration file to stop the main synapse sending appservice notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set `send_federation: False` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
If running multiple federation senders then you must list each
|
||||
instance in the `federation_sender_instances` option by their `worker_name`.
|
||||
All instances must be stopped and started when adding or removing instances.
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
### `synapse.app.media_repository`
|
||||
|
||||
@@ -307,47 +367,12 @@ expose the `media` resource. For example:
|
||||
- media
|
||||
```
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
Note that if running multiple media repositories they must be on the same server
|
||||
and you must configure a single instance to run the background tasks, e.g.:
|
||||
|
||||
### `synapse.app.client_reader`
|
||||
|
||||
Handles client API endpoints. It can handle REST endpoints matching the
|
||||
following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/account_data/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/rooms/[^/]*/account_data/
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance:
|
||||
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
Pagination requests can also be handled, but all requests with the same path
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
ensure that the purge history admin API is not used while pagination requests
|
||||
for the room are in flight:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
|
||||
```yaml
|
||||
media_instance_running_background_jobs: "media-repository-1"
|
||||
```
|
||||
|
||||
### `synapse.app.user_dir`
|
||||
|
||||
@@ -383,15 +408,65 @@ file. For example:
|
||||
|
||||
worker_main_http_uri: http://127.0.0.1:8008
|
||||
|
||||
### `synapse.app.event_creator`
|
||||
### Historical apps
|
||||
|
||||
Handles some event creation. It can handle REST endpoints matching:
|
||||
*Note:* Historically there used to be more apps, however they have been
|
||||
amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
|
||||
are ones that do specific processing unrelated to requests, e.g. the `pusher`
|
||||
that handles sending out push notifications for new events. The intention is for
|
||||
all these to be folded into the `generic_worker` app and to use config to define
|
||||
which processes handle the various proccessing such as push notifications.
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
## Migration from old config
|
||||
|
||||
There are two main independent changes that have been made: introducing Redis
|
||||
support and merging apps into `synapse.app.generic_worker`. Both these changes
|
||||
are backwards compatible and so no changes to the config are required, however
|
||||
server admins are encouraged to plan to migrate to Redis as the old style direct
|
||||
TCP replication config is deprecated.
|
||||
|
||||
To migrate to Redis add the `redis` config as above, and optionally remove the
|
||||
TCP `replication` listener from master and `worker_replication_port` from worker
|
||||
config.
|
||||
|
||||
To migrate apps to use `synapse.app.generic_worker` simply update the
|
||||
`worker_app` option in the worker configs, and where worker are started (e.g.
|
||||
in systemd service files, but not required for synctl).
|
||||
|
||||
|
||||
## Architectural diagram
|
||||
|
||||
The following shows an example setup using Redis and a reverse proxy:
|
||||
|
||||
```
|
||||
Clients & Federation
|
||||
|
|
||||
v
|
||||
+-----------+
|
||||
| |
|
||||
| Reverse |
|
||||
| Proxy |
|
||||
| |
|
||||
+-----------+
|
||||
| | |
|
||||
| | | HTTP requests
|
||||
+-------------------+ | +-----------+
|
||||
| +---+ |
|
||||
| | |
|
||||
v v v
|
||||
+--------------+ +--------------+ +--------------+ +--------------+
|
||||
| Main | | Generic | | Generic | | Event |
|
||||
| Process | | Worker 1 | | Worker 2 | | Persister |
|
||||
+--------------+ +--------------+ +--------------+ +--------------+
|
||||
^ ^ | ^ | | ^ | ^ ^
|
||||
| | | | | | | | | |
|
||||
| | | | | HTTP | | | | |
|
||||
| +----------+<--|---|---------+ | | | |
|
||||
| | +-------------|-->+----------+ |
|
||||
| | | |
|
||||
| | | |
|
||||
v v v v
|
||||
====================================================================
|
||||
Redis pub/sub channel
|
||||
```
|
||||
|
||||
6
mypy.ini
6
mypy.ini
@@ -78,3 +78,9 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-nacl.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -24,7 +24,6 @@ DISTS = (
|
||||
"debian:sid",
|
||||
"ubuntu:xenial",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:eoan",
|
||||
"ubuntu:focal",
|
||||
)
|
||||
|
||||
|
||||
@@ -3,37 +3,60 @@
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
echo -e "+++ \033[32mChecking newsfragment\033[m"
|
||||
|
||||
set -e
|
||||
|
||||
# make sure that origin/develop is up to date
|
||||
git remote set-branches --add origin develop
|
||||
git fetch origin develop
|
||||
git fetch -q origin develop
|
||||
|
||||
pr="$BUILDKITE_PULL_REQUEST"
|
||||
|
||||
# if there are changes in the debian directory, check that the debian changelog
|
||||
# has been updated
|
||||
if ! git diff --quiet FETCH_HEAD... -- debian; then
|
||||
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
|
||||
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if there are changes *outside* the debian directory, check that the
|
||||
# newsfragments have been updated.
|
||||
if git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
|
||||
tox -e check-newsfragment
|
||||
if ! git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Print a link to the contributing guide if the user makes a mistake
|
||||
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
|
||||
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
|
||||
|
||||
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
|
||||
tox -qe check-newsfragment || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)
|
||||
|
||||
echo
|
||||
echo "--------------------------"
|
||||
echo
|
||||
|
||||
# check that any new newsfiles on this branch end with a full stop.
|
||||
matched=0
|
||||
for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
|
||||
# check that any modified newsfiles on this branch end with a full stop.
|
||||
lastchar=`tr -d '\n' < $f | tail -c 1`
|
||||
if [ $lastchar != '.' -a $lastchar != '!' ]; then
|
||||
echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
|
||||
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# see if this newsfile corresponds to the right PR
|
||||
[[ -n "$pr" && "$f" == changelog.d/"$pr".* ]] && matched=1
|
||||
done
|
||||
|
||||
if [[ -n "$pr" && "$matched" -eq 0 ]]; then
|
||||
echo -e "\e[31mERROR: Did not find a news fragment with the right number: expected changelog.d/$pr.*.\e[39m" >&2
|
||||
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
34
scripts-dev/check_line_terminators.sh
Executable file
34
scripts-dev/check_line_terminators.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# This script checks that line terminators in all repository files (excluding
|
||||
# those in the .git directory) feature unix line terminators.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# ./check_line_terminators.sh
|
||||
#
|
||||
# The script will emit exit code 1 if any files that do not use unix line
|
||||
# terminators are found, 0 otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Find and print files with non-unix line terminators
|
||||
if find . -path './.git/*' -prune -o -type f -print0 | xargs -0 grep -I -l $'\r$'; then
|
||||
echo -e '\e[31mERROR: found files with CRLF line endings. See above.\e[39m'
|
||||
exit 1
|
||||
fi
|
||||
@@ -2,9 +2,9 @@ import argparse
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import urllib2
|
||||
|
||||
import dns.resolver
|
||||
import urllib2
|
||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
@@ -21,8 +21,7 @@ import argparse
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
|
||||
from six.moves.urllib import parse as urlparse
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import nacl.signing
|
||||
import requests
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
# flake8 - lints and finds mistakes
|
||||
# black - opinionated code formatter
|
||||
# flake8 - lints and finds mistakes
|
||||
|
||||
set -e
|
||||
|
||||
@@ -11,11 +11,11 @@ if [ $# -ge 1 ]
|
||||
then
|
||||
files=$*
|
||||
else
|
||||
files="synapse tests scripts-dev scripts"
|
||||
files="synapse tests scripts-dev scripts contrib synctl"
|
||||
fi
|
||||
|
||||
echo "Linting these locations: $files"
|
||||
isort -y -rc $files
|
||||
flake8 $files
|
||||
isort $files
|
||||
python3 -m black $files
|
||||
./scripts-dev/config-lint.sh
|
||||
flake8 $files
|
||||
|
||||
@@ -40,7 +40,7 @@ class MockHomeserver(HomeServer):
|
||||
config.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
self.version_string = "Synapse/"+get_version_string(synapse)
|
||||
self.version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -86,7 +86,7 @@ if __name__ == "__main__":
|
||||
store = hs.get_datastore()
|
||||
|
||||
async def run_background_updates():
|
||||
await store.db.updates.run_background_updates(sleep=False)
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
|
||||
@@ -23,8 +23,6 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from six import string_types
|
||||
|
||||
import yaml
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
@@ -37,30 +35,29 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.deviceinbox import (
|
||||
DeviceInboxBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.events_bg_updates import (
|
||||
from synapse.storage.database import DatabasePool, make_conn
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.media_repository import (
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.registration import (
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
)
|
||||
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.stats import StatsStore
|
||||
from synapse.storage.data_stores.main.user_directory import (
|
||||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.user_directory import (
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.database import Database, make_conn
|
||||
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.util import Clock
|
||||
@@ -91,6 +88,7 @@ BOOLEAN_COLUMNS = {
|
||||
"account_validity": ["email_sent"],
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
}
|
||||
|
||||
|
||||
@@ -129,6 +127,26 @@ APPEND_ONLY_TABLES = [
|
||||
]
|
||||
|
||||
|
||||
IGNORED_TABLES = {
|
||||
# We don't port these tables, as they're a faff and we can regenerate
|
||||
# them anyway.
|
||||
"user_directory",
|
||||
"user_directory_search",
|
||||
"user_directory_search_content",
|
||||
"user_directory_search_docsize",
|
||||
"user_directory_search_segdir",
|
||||
"user_directory_search_segments",
|
||||
"user_directory_search_stat",
|
||||
"user_directory_search_pos",
|
||||
"users_who_share_private_rooms",
|
||||
"users_in_public_room",
|
||||
# UI auth sessions have foreign keys so additional care needs to be taken,
|
||||
# the sessions are transient anyway, so ignore them.
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
}
|
||||
|
||||
|
||||
# Error returned by the run function. Used at the top-level part of the script to
|
||||
# handle errors and return codes.
|
||||
end_error = None
|
||||
@@ -155,14 +173,14 @@ class Store(
|
||||
StatsStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.db.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
def execute_sql(self, sql, *args):
|
||||
def r(txn):
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
|
||||
return self.db.runInteraction("execute_sql", r)
|
||||
return self.db_pool.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
@@ -207,7 +225,7 @@ class Porter(object):
|
||||
async def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = await self.postgres_store.db.simple_select_one(
|
||||
row = await self.postgres_store.db_pool.simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
@@ -224,7 +242,7 @@ class Porter(object):
|
||||
) = await self._setup_sent_transactions()
|
||||
backward_chunk = 0
|
||||
else:
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
@@ -254,7 +272,7 @@ class Porter(object):
|
||||
|
||||
await self.postgres_store.execute(delete_all)
|
||||
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||
)
|
||||
@@ -291,21 +309,14 @@ class Porter(object):
|
||||
)
|
||||
return
|
||||
|
||||
if table in (
|
||||
"user_directory",
|
||||
"user_directory_search",
|
||||
"users_who_share_rooms",
|
||||
"users_in_pubic_room",
|
||||
):
|
||||
# We don't port these tables, as they're a faff and we can regenreate
|
||||
# them anyway.
|
||||
if table in IGNORED_TABLES:
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
return
|
||||
|
||||
if table == "user_directory_stream_pos":
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table=table, values={"stream_id": None}
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
@@ -346,7 +357,7 @@ class Porter(object):
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = await self.sqlite_store.db.runInteraction(
|
||||
headers, frows, brows = await self.sqlite_store.db_pool.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
@@ -362,7 +373,7 @@ class Porter(object):
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
self.postgres_store.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
@@ -400,7 +411,7 @@ class Porter(object):
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
@@ -438,7 +449,7 @@ class Porter(object):
|
||||
],
|
||||
)
|
||||
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
self.postgres_store.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
@@ -481,7 +492,7 @@ class Porter(object):
|
||||
db_conn, allow_outdated_version=allow_outdated_version
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
store = Store(Database(hs, db_config, engine), db_conn, hs)
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs)
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
@@ -489,7 +500,7 @@ class Porter(object):
|
||||
async def run_background_updates_on_postgres(self):
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = (
|
||||
await self.postgres_store.db.updates.has_completed_background_updates()
|
||||
await self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
if not postgres_ready:
|
||||
@@ -498,9 +509,9 @@ class Porter(object):
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
await self.postgres_store.db.updates.do_next_background_update(100)
|
||||
await self.postgres_store.db_pool.updates.do_next_background_update(100)
|
||||
postgres_ready = await (
|
||||
self.postgres_store.db.updates.has_completed_background_updates()
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
async def run(self):
|
||||
@@ -521,7 +532,7 @@ class Porter(object):
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
await self.sqlite_store.db.updates.has_completed_background_updates()
|
||||
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
if not updates_complete:
|
||||
end_error = (
|
||||
@@ -563,22 +574,24 @@ class Porter(object):
|
||||
)
|
||||
|
||||
try:
|
||||
await self.postgres_store.db.runInteraction("alter_table", alter_table)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception:
|
||||
# On Error Resume Next
|
||||
pass
|
||||
|
||||
await self.postgres_store.db.runInteraction(
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = await self.sqlite_store.db.simple_select_onecol(
|
||||
sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol(
|
||||
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||
)
|
||||
|
||||
postgres_tables = await self.postgres_store.db.simple_select_onecol(
|
||||
postgres_tables = await self.postgres_store.db_pool.simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
@@ -610,8 +623,10 @@ class Porter(object):
|
||||
)
|
||||
)
|
||||
|
||||
# Step 5. Do final post-processing
|
||||
# Step 5. Set up sequences
|
||||
self.progress.set_state("Setting up sequence generators")
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
|
||||
self.progress.done()
|
||||
except Exception as e:
|
||||
@@ -635,7 +650,7 @@ class Porter(object):
|
||||
return bool(col)
|
||||
if isinstance(col, bytes):
|
||||
return bytearray(col)
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
elif isinstance(col, str) and "\0" in col:
|
||||
logger.warning(
|
||||
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||
table,
|
||||
@@ -677,7 +692,7 @@ class Porter(object):
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r)
|
||||
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
@@ -710,7 +725,7 @@ class Porter(object):
|
||||
next_chunk = await self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
@@ -779,7 +794,14 @@ class Porter(object):
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.db.runInteraction("setup_state_group_id_seq", r)
|
||||
return self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
def _setup_user_id_seq(self):
|
||||
def r(txn):
|
||||
next_id = find_max_generated_user_id_localpart(txn) + 1
|
||||
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
|
||||
##############################################
|
||||
|
||||
@@ -26,12 +26,11 @@ ignore=W503,W504,E203,E731,E501
|
||||
|
||||
[isort]
|
||||
line_length = 88
|
||||
not_skip = __init__.py
|
||||
sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
|
||||
default_section=THIRDPARTY
|
||||
known_first_party = synapse
|
||||
known_tests=tests
|
||||
known_compat = mock,six
|
||||
known_compat = mock
|
||||
known_twisted=twisted,OpenSSL
|
||||
multi_line_output=3
|
||||
include_trailing_comma=true
|
||||
|
||||
1
setup.py
1
setup.py
@@ -114,6 +114,7 @@ setup(
|
||||
"Programming Language :: Python :: 3.5",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
],
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={"test": TestCommand},
|
||||
|
||||
@@ -22,6 +22,7 @@ class RedisProtocol:
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
|
||||
class SubscriberProtocol:
|
||||
def __init__(self, *args, **kwargs): ...
|
||||
password: Optional[str]
|
||||
def subscribe(self, channels: Union[str, List[str]]): ...
|
||||
def connectionMade(self): ...
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
""" This is a reference implementation of a Matrix homeserver.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -25,6 +26,9 @@ if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||
# actually start Synapse will import these libraries fine.
|
||||
try:
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet.protocol import Factory
|
||||
@@ -36,7 +40,15 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.14.0"
|
||||
# Use the standard library json implementation instead of simplejson.
|
||||
try:
|
||||
from canonicaljson import set_json_library
|
||||
|
||||
set_json_library(json)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.19.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -23,8 +23,6 @@ import hmac
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from six.moves import input
|
||||
|
||||
import requests as _requests
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -12,19 +12,14 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from six import itervalues
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse.logging.opentracing as opentracing
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
@@ -37,6 +32,7 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging import opentracing as opentracing
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -83,28 +79,28 @@ class Auth(object):
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids()
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
):
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)}
|
||||
auth_events = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_in_room(
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
current_state: Optional[StateMap[EventBase]] = None,
|
||||
allow_departed_users: bool = False,
|
||||
):
|
||||
) -> EventBase:
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
@@ -122,35 +118,35 @@ class Auth(object):
|
||||
Raises:
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
Deferred[Optional[EventBase]]:
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
if current_state:
|
||||
member = current_state.get((EventTypes.Member, user_id), None)
|
||||
else:
|
||||
member = yield self.state.get_current_state(
|
||||
member = await self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
if member:
|
||||
membership = member.membership
|
||||
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = yield self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = await self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
return member
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
async def check_host_in_room(self, room_id, host):
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
latest_event_ids = await self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
@@ -161,14 +157,13 @@ class Auth(object):
|
||||
def get_public_keys(self, invite_event):
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_req(
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: Request,
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
):
|
||||
) -> synapse.types.Requester:
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -181,7 +176,7 @@ class Auth(object):
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
defer.Deferred: resolves to a `synapse.types.Requester` object
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
@@ -195,14 +190,14 @@ class Auth(object):
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
user_id, app_service = await self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
yield self.store.insert_client_ip(
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -212,17 +207,18 @@ class Auth(object):
|
||||
|
||||
return synapse.types.create_requester(user_id, app_service=app_service)
|
||||
|
||||
user_info = yield self.get_user_by_access_token(
|
||||
user_info = await self.get_user_by_access_token(
|
||||
access_token, rights, allow_expired=allow_expired
|
||||
)
|
||||
user = user_info["user"]
|
||||
token_id = user_info["token_id"]
|
||||
is_guest = user_info["is_guest"]
|
||||
shadow_banned = user_info["shadow_banned"]
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
user_id = user.to_string()
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
|
||||
if (
|
||||
expiration_ts is not None
|
||||
and self.clock.time_msec() >= expiration_ts
|
||||
@@ -236,7 +232,7 @@ class Auth(object):
|
||||
device_id = user_info.get("device_id")
|
||||
|
||||
if user and access_token and ip_addr:
|
||||
yield self.store.insert_client_ip(
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -257,13 +253,17 @@ class Auth(object):
|
||||
opentracing.set_tag("device_id", device_id)
|
||||
|
||||
return synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
user,
|
||||
token_id,
|
||||
is_guest,
|
||||
shadow_banned,
|
||||
device_id,
|
||||
app_service=app_service,
|
||||
)
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
async def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
@@ -284,14 +284,13 @@ class Auth(object):
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(403, "Application service cannot masquerade as this user.")
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
if not (await self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(403, "Application service has not registered this user")
|
||||
return user_id, app_service
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(
|
||||
async def get_user_by_access_token(
|
||||
self, token: str, rights: str = "access", allow_expired: bool = False,
|
||||
):
|
||||
) -> dict:
|
||||
""" Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
@@ -301,9 +300,10 @@ class Auth(object):
|
||||
allow_expired: If False, raises an InvalidClientTokenError
|
||||
if the token is expired
|
||||
Returns:
|
||||
Deferred[dict]: dict that includes:
|
||||
dict that includes:
|
||||
`user` (UserID)
|
||||
`is_guest` (bool)
|
||||
`shadow_banned` (bool)
|
||||
`token_id` (int|None): access token id. May be None if guest
|
||||
`device_id` (str|None): device corresponding to access token
|
||||
Raises:
|
||||
@@ -315,7 +315,7 @@ class Auth(object):
|
||||
|
||||
if rights == "access":
|
||||
# first look in the database
|
||||
r = yield self._look_up_user_by_access_token(token)
|
||||
r = await self._look_up_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r["valid_until_ms"]
|
||||
if (
|
||||
@@ -353,7 +353,7 @@ class Auth(object):
|
||||
# It would of course be much easier to store guest access
|
||||
# tokens in the database as well, but that would break existing
|
||||
# guest tokens.
|
||||
stored_user = yield self.store.get_user_by_id(user_id)
|
||||
stored_user = await self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
@@ -363,6 +363,7 @@ class Auth(object):
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": True,
|
||||
"shadow_banned": False,
|
||||
"token_id": None,
|
||||
# all guests get the same device id
|
||||
"device_id": GUEST_DEVICE_ID,
|
||||
@@ -372,6 +373,7 @@ class Auth(object):
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": False,
|
||||
"shadow_banned": False,
|
||||
"token_id": None,
|
||||
"device_id": None,
|
||||
}
|
||||
@@ -483,9 +485,8 @@ class Auth(object):
|
||||
now = self.hs.get_clock().time_msec()
|
||||
return now < expiry
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
async def _look_up_user_by_access_token(self, token):
|
||||
ret = await self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
return None
|
||||
|
||||
@@ -496,6 +497,7 @@ class Auth(object):
|
||||
"user": UserID.from_string(ret.get("name")),
|
||||
"token_id": ret.get("token_id", None),
|
||||
"is_guest": False,
|
||||
"shadow_banned": ret.get("shadow_banned"),
|
||||
"device_id": ret.get("device_id"),
|
||||
"valid_until_ms": ret.get("valid_until_ms"),
|
||||
}
|
||||
@@ -508,22 +510,22 @@ class Auth(object):
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
return service
|
||||
|
||||
def is_server_admin(self, user):
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
""" Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
user (UserID): user to check
|
||||
user: user to check
|
||||
|
||||
Returns:
|
||||
bool: True if the user is an admin
|
||||
True if the user is an admin
|
||||
"""
|
||||
return self.store.is_server_admin(user)
|
||||
return await self.store.is_server_admin(user)
|
||||
|
||||
def compute_auth_events(
|
||||
self, event, current_state_ids: StateMap[str], for_verification: bool = False,
|
||||
):
|
||||
) -> List[str]:
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
|
||||
@@ -531,16 +533,16 @@ class Auth(object):
|
||||
should be added to the event's `auth_events`.
|
||||
|
||||
Returns:
|
||||
defer.Deferred(list[str]): List of event IDs.
|
||||
List of event IDs.
|
||||
"""
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
return defer.succeed([])
|
||||
return []
|
||||
|
||||
# Currently we ignore the `for_verification` flag even though there are
|
||||
# some situations where we can drop particular auth events when adding
|
||||
# to the event's `auth_events` (e.g. joins pointing to previous joins
|
||||
# when room is publically joinable). Dropping event IDs has the
|
||||
# when room is publicly joinable). Dropping event IDs has the
|
||||
# advantage that the auth chain for the room grows slower, but we use
|
||||
# the auth chain in state resolution v2 to order events, which means
|
||||
# care must be taken if dropping events to ensure that it doesn't
|
||||
@@ -554,7 +556,7 @@ class Auth(object):
|
||||
if auth_ev_id:
|
||||
auth_ids.append(auth_ev_id)
|
||||
|
||||
return defer.succeed(auth_ids)
|
||||
return auth_ids
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
@@ -637,10 +639,9 @@ class Auth(object):
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_in_room_or_world_readable(
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, user_id: str, allow_departed_users: bool = False
|
||||
):
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
@@ -651,10 +652,9 @@ class Auth(object):
|
||||
members but have now departed
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
the user is not in the room and never has been, then
|
||||
`(Membership.JOIN, None)` is returned.
|
||||
Resolves to the current membership of the user in the room and the
|
||||
membership event ID of the user. If the user is not in the room and
|
||||
never has been, then `(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -663,12 +663,12 @@ class Auth(object):
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_in_room(
|
||||
member_event = await self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
visibility = await self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
@@ -36,8 +34,7 @@ class AuthBlocking(object):
|
||||
self._limit_usage_by_mau = hs.config.limit_usage_by_mau
|
||||
self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
async def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
@@ -60,7 +57,7 @@ class AuthBlocking(object):
|
||||
if user_id is not None:
|
||||
if user_id == self._server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
if await self.store.is_support_user(user_id):
|
||||
return
|
||||
|
||||
if self._hs_disabled:
|
||||
@@ -76,11 +73,11 @@ class AuthBlocking(object):
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
timestamp = await self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
is_trial = await self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
@@ -93,7 +90,7 @@ class AuthBlocking(object):
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
current_mau = await self.store.get_monthly_active_count()
|
||||
if current_mau >= self._max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
|
||||
@@ -61,13 +61,9 @@ class LoginType(object):
|
||||
MSISDN = "m.login.msisdn"
|
||||
RECAPTCHA = "m.login.recaptcha"
|
||||
TERMS = "m.login.terms"
|
||||
SSO = "org.matrix.login.sso"
|
||||
SSO = "m.login.sso"
|
||||
DUMMY = "m.login.dummy"
|
||||
|
||||
# Only for C/S API v1
|
||||
APPLICATION_SERVICE = "m.login.application_service"
|
||||
SHARED_SECRET = "org.matrix.login.shared_secret"
|
||||
|
||||
|
||||
class EventTypes(object):
|
||||
Member = "m.room.member"
|
||||
@@ -154,3 +150,8 @@ class EventContentFields(object):
|
||||
# Timestamp to delete the event after
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2228
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
|
||||
class RoomEncryptionAlgorithms(object):
|
||||
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
|
||||
DEFAULT = MEGOLM_V1_AES_SHA2
|
||||
|
||||
@@ -17,15 +17,17 @@
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
from six import iteritems
|
||||
from six.moves import http_client
|
||||
import typing
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.web import http
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -80,11 +82,11 @@ class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes.
|
||||
|
||||
Attributes:
|
||||
code (int): HTTP error code
|
||||
msg (str): string describing the error
|
||||
code: HTTP error code
|
||||
msg: string describing the error
|
||||
"""
|
||||
|
||||
def __init__(self, code, msg):
|
||||
def __init__(self, code: Union[int, HTTPStatus], msg: str):
|
||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||
|
||||
# Some calls to this method pass instances of http.HTTPStatus for `code`.
|
||||
@@ -125,16 +127,16 @@ class SynapseError(CodeMessageException):
|
||||
message (as well as an HTTP status code).
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
errcode: Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
def __init__(self, code: int, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
"""Constructs a synapse error.
|
||||
|
||||
Args:
|
||||
code (int): The integer error code (an HTTP response code)
|
||||
msg (str): The human-readable error message.
|
||||
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||
code: The integer error code (an HTTP response code)
|
||||
msg: The human-readable error message.
|
||||
errcode: The matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
super(SynapseError, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
@@ -147,10 +149,16 @@ class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
errcode: Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
|
||||
def __init__(
|
||||
self,
|
||||
code: int,
|
||||
msg: str,
|
||||
errcode: str = Codes.UNKNOWN,
|
||||
additional_fields: Optional[Dict] = None,
|
||||
):
|
||||
super(ProxiedRequestError, self).__init__(code, msg, errcode)
|
||||
if additional_fields is None:
|
||||
self._additional_fields = {} # type: Dict
|
||||
@@ -166,15 +174,15 @@ class ConsentNotGivenError(SynapseError):
|
||||
privacy policy.
|
||||
"""
|
||||
|
||||
def __init__(self, msg, consent_uri):
|
||||
def __init__(self, msg: str, consent_uri: str):
|
||||
"""Constructs a ConsentNotGivenError
|
||||
|
||||
Args:
|
||||
msg (str): The human-readable error message
|
||||
consent_url (str): The URL where the user can give their consent
|
||||
msg: The human-readable error message
|
||||
consent_url: The URL where the user can give their consent
|
||||
"""
|
||||
super(ConsentNotGivenError, self).__init__(
|
||||
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.CONSENT_NOT_GIVEN
|
||||
code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.CONSENT_NOT_GIVEN
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
@@ -187,14 +195,14 @@ class UserDeactivatedError(SynapseError):
|
||||
authenticated endpoint, but the account has been deactivated.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
def __init__(self, msg: str):
|
||||
"""Constructs a UserDeactivatedError
|
||||
|
||||
Args:
|
||||
msg (str): The human-readable error message
|
||||
msg: The human-readable error message
|
||||
"""
|
||||
super(UserDeactivatedError, self).__init__(
|
||||
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED
|
||||
code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED
|
||||
)
|
||||
|
||||
|
||||
@@ -203,16 +211,16 @@ class FederationDeniedError(SynapseError):
|
||||
is not on its federation whitelist.
|
||||
|
||||
Attributes:
|
||||
destination (str): The destination which has been denied
|
||||
destination: The destination which has been denied
|
||||
"""
|
||||
|
||||
def __init__(self, destination):
|
||||
def __init__(self, destination: Optional[str]):
|
||||
"""Raised by federation client or server to indicate that we are
|
||||
are deliberately not attempting to contact a given server because it is
|
||||
not on our federation whitelist.
|
||||
|
||||
Args:
|
||||
destination (str): the domain in question
|
||||
destination: the domain in question
|
||||
"""
|
||||
|
||||
self.destination = destination
|
||||
@@ -230,14 +238,16 @@ class InteractiveAuthIncompleteError(Exception):
|
||||
(This indicates we should return a 401 with 'result' as the body)
|
||||
|
||||
Attributes:
|
||||
result (dict): the server response to the request, which should be
|
||||
session_id: The ID of the ongoing interactive auth session.
|
||||
result: the server response to the request, which should be
|
||||
passed back to the client
|
||||
"""
|
||||
|
||||
def __init__(self, result):
|
||||
def __init__(self, session_id: str, result: "JsonDict"):
|
||||
super(InteractiveAuthIncompleteError, self).__init__(
|
||||
"Interactive auth not yet complete"
|
||||
)
|
||||
self.session_id = session_id
|
||||
self.result = result
|
||||
|
||||
|
||||
@@ -247,7 +257,6 @@ class UnrecognizedRequestError(SynapseError):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.UNRECOGNIZED
|
||||
message = None
|
||||
if len(args) == 0:
|
||||
message = "Unrecognized request"
|
||||
else:
|
||||
@@ -258,7 +267,7 @@ class UnrecognizedRequestError(SynapseError):
|
||||
class NotFoundError(SynapseError):
|
||||
"""An error indicating we can't find the thing you asked for"""
|
||||
|
||||
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||
def __init__(self, msg: str = "Not found", errcode: str = Codes.NOT_FOUND):
|
||||
super(NotFoundError, self).__init__(404, msg, errcode=errcode)
|
||||
|
||||
|
||||
@@ -284,21 +293,23 @@ class InvalidClientCredentialsError(SynapseError):
|
||||
M_UNKNOWN_TOKEN respectively.
|
||||
"""
|
||||
|
||||
def __init__(self, msg, errcode):
|
||||
def __init__(self, msg: str, errcode: str):
|
||||
super().__init__(code=401, msg=msg, errcode=errcode)
|
||||
|
||||
|
||||
class MissingClientTokenError(InvalidClientCredentialsError):
|
||||
"""Raised when we couldn't find the access token in a request"""
|
||||
|
||||
def __init__(self, msg="Missing access token"):
|
||||
def __init__(self, msg: str = "Missing access token"):
|
||||
super().__init__(msg=msg, errcode="M_MISSING_TOKEN")
|
||||
|
||||
|
||||
class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||
"""Raised when we didn't understand the access token in a request"""
|
||||
|
||||
def __init__(self, msg="Unrecognised access token", soft_logout=False):
|
||||
def __init__(
|
||||
self, msg: str = "Unrecognised access token", soft_logout: bool = False
|
||||
):
|
||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||
self._soft_logout = soft_logout
|
||||
|
||||
@@ -316,11 +327,11 @@ class ResourceLimitError(SynapseError):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code,
|
||||
msg,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=None,
|
||||
limit_type=None,
|
||||
code: int,
|
||||
msg: str,
|
||||
errcode: str = Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact: Optional[str] = None,
|
||||
limit_type: Optional[str] = None,
|
||||
):
|
||||
self.admin_contact = admin_contact
|
||||
self.limit_type = limit_type
|
||||
@@ -368,10 +379,10 @@ class StoreError(SynapseError):
|
||||
class InvalidCaptchaError(SynapseError):
|
||||
def __init__(
|
||||
self,
|
||||
code=400,
|
||||
msg="Invalid captcha.",
|
||||
error_url=None,
|
||||
errcode=Codes.CAPTCHA_INVALID,
|
||||
code: int = 400,
|
||||
msg: str = "Invalid captcha.",
|
||||
error_url: Optional[str] = None,
|
||||
errcode: str = Codes.CAPTCHA_INVALID,
|
||||
):
|
||||
super(InvalidCaptchaError, self).__init__(code, msg, errcode)
|
||||
self.error_url = error_url
|
||||
@@ -386,10 +397,10 @@ class LimitExceededError(SynapseError):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code=429,
|
||||
msg="Too Many Requests",
|
||||
retry_after_ms=None,
|
||||
errcode=Codes.LIMIT_EXCEEDED,
|
||||
code: int = 429,
|
||||
msg: str = "Too Many Requests",
|
||||
retry_after_ms: Optional[int] = None,
|
||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||
):
|
||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
@@ -402,10 +413,10 @@ class RoomKeysVersionError(SynapseError):
|
||||
"""A client has tried to upload to a non-current version of the room_keys store
|
||||
"""
|
||||
|
||||
def __init__(self, current_version):
|
||||
def __init__(self, current_version: str):
|
||||
"""
|
||||
Args:
|
||||
current_version (str): the current version of the store they should have used
|
||||
current_version: the current version of the store they should have used
|
||||
"""
|
||||
super(RoomKeysVersionError, self).__init__(
|
||||
403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION
|
||||
@@ -417,7 +428,7 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
"""The client's request to create a room used a room version that the server does
|
||||
not support."""
|
||||
|
||||
def __init__(self, msg="Homeserver does not support this room version"):
|
||||
def __init__(self, msg: str = "Homeserver does not support this room version"):
|
||||
super(UnsupportedRoomVersionError, self).__init__(
|
||||
code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
@@ -439,7 +450,7 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
failing.
|
||||
"""
|
||||
|
||||
def __init__(self, room_version):
|
||||
def __init__(self, room_version: str):
|
||||
super(IncompatibleRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Your homeserver does not support the features required to "
|
||||
@@ -459,8 +470,8 @@ class PasswordRefusedError(SynapseError):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
msg="This password doesn't comply with the server's policy",
|
||||
errcode=Codes.WEAK_PASSWORD,
|
||||
msg: str = "This password doesn't comply with the server's policy",
|
||||
errcode: str = Codes.WEAK_PASSWORD,
|
||||
):
|
||||
super(PasswordRefusedError, self).__init__(
|
||||
code=400, msg=msg, errcode=errcode,
|
||||
@@ -485,19 +496,19 @@ class RequestSendFailed(RuntimeError):
|
||||
self.can_retry = can_retry
|
||||
|
||||
|
||||
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
|
||||
""" Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
|
||||
Args:
|
||||
msg (str): The error message.
|
||||
code (str): The error code.
|
||||
kwargs : Additional keys to add to the response.
|
||||
msg: The error message.
|
||||
code: The error code.
|
||||
kwargs: Additional keys to add to the response.
|
||||
Returns:
|
||||
A dict representing the error response JSON.
|
||||
"""
|
||||
err = {"error": msg, "errcode": code}
|
||||
for key, value in iteritems(kwargs):
|
||||
for key, value in kwargs.items():
|
||||
err[key] = value
|
||||
return err
|
||||
|
||||
@@ -514,7 +525,14 @@ class FederationError(RuntimeError):
|
||||
is wrong (e.g., it referred to an invalid event)
|
||||
"""
|
||||
|
||||
def __init__(self, level, code, reason, affected, source=None):
|
||||
def __init__(
|
||||
self,
|
||||
level: str,
|
||||
code: int,
|
||||
reason: str,
|
||||
affected: str,
|
||||
source: Optional[str] = None,
|
||||
):
|
||||
if level not in ["FATAL", "ERROR", "WARN"]:
|
||||
raise ValueError("Level is not valid: %s" % (level,))
|
||||
self.level = level
|
||||
@@ -541,16 +559,16 @@ class HttpResponseException(CodeMessageException):
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
Attributes:
|
||||
response (bytes): body of response
|
||||
response: body of response
|
||||
"""
|
||||
|
||||
def __init__(self, code, msg, response):
|
||||
def __init__(self, code: int, msg: str, response: bytes):
|
||||
"""
|
||||
|
||||
Args:
|
||||
code (int): HTTP status code
|
||||
msg (str): reason phrase from HTTP response status line
|
||||
response (bytes): body of response
|
||||
code: HTTP status code
|
||||
msg: reason phrase from HTTP response status line
|
||||
response: body of response
|
||||
"""
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
self.response = response
|
||||
@@ -575,7 +593,7 @@ class HttpResponseException(CodeMessageException):
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(self.response)
|
||||
j = json.loads(self.response.decode("utf-8"))
|
||||
except ValueError:
|
||||
j = {}
|
||||
|
||||
|
||||
@@ -17,17 +17,13 @@
|
||||
# limitations under the License.
|
||||
from typing import List
|
||||
|
||||
from six import text_type
|
||||
|
||||
import jsonschema
|
||||
from canonicaljson import json
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
@@ -139,9 +135,8 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
async def get_user_filter(self, user_localpart, filter_id):
|
||||
result = await self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(result)
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
@@ -313,7 +308,7 @@ class Filter(object):
|
||||
|
||||
content = event.get("content", {})
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
contains_url = isinstance(content.get("url"), text_type)
|
||||
contains_url = isinstance(content.get("url"), str)
|
||||
labels = content.get(EventContentFields.LABELS, [])
|
||||
|
||||
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,75 +17,157 @@ from collections import OrderedDict
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.util import Clock
|
||||
|
||||
|
||||
class Ratelimiter(object):
|
||||
"""
|
||||
Ratelimit message sending by user.
|
||||
Ratelimit actions marked by arbitrary keys.
|
||||
|
||||
Args:
|
||||
clock: A homeserver clock, for retrieving the current time
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.message_counts = (
|
||||
OrderedDict()
|
||||
) # type: OrderedDict[Any, Tuple[float, int, Optional[float]]]
|
||||
def __init__(self, clock: Clock, rate_hz: float, burst_count: int):
|
||||
self.clock = clock
|
||||
self.rate_hz = rate_hz
|
||||
self.burst_count = burst_count
|
||||
|
||||
def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
|
||||
# A ordered dictionary keeping track of actions, when they were last
|
||||
# performed and how often. Each entry is a mapping from a key of arbitrary type
|
||||
# to a tuple representing:
|
||||
# * How many times an action has occurred since a point in time
|
||||
# * The point in time
|
||||
# * The rate_hz of this particular entry. This can vary per request
|
||||
self.actions = OrderedDict() # type: OrderedDict[Any, Tuple[float, int, float]]
|
||||
|
||||
def can_do_action(
|
||||
self,
|
||||
key: Any,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
|
||||
Args:
|
||||
key: The key we should use when rate limiting. Can be a user ID
|
||||
(when sending events), an IP address, etc.
|
||||
time_now_s: The time now.
|
||||
rate_hz: The long term number of messages a user can send in a
|
||||
second.
|
||||
burst_count: How many messages the user can send before being
|
||||
limited.
|
||||
update (bool): Whether to update the message rates or not. This is
|
||||
useful to check if a message would be allowed to be sent before
|
||||
its ready to be actually sent.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
Returns:
|
||||
A pair of a bool indicating if they can send a message now and a
|
||||
time in seconds of when they can next send a message.
|
||||
A tuple containing:
|
||||
* A bool indicating if they can perform the action now
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
self.prune_message_counts(time_now_s)
|
||||
message_count, time_start, _ignored = self.message_counts.get(
|
||||
key, (0.0, time_now_s, None)
|
||||
)
|
||||
# Override default values if set
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
burst_count = burst_count if burst_count is not None else self.burst_count
|
||||
|
||||
# Remove any expired entries
|
||||
self._prune_message_counts(time_now_s)
|
||||
|
||||
# Check if there is an existing count entry for this key
|
||||
action_count, time_start, _ = self.actions.get(key, (0.0, time_now_s, 0.0))
|
||||
|
||||
# Check whether performing another action is allowed
|
||||
time_delta = time_now_s - time_start
|
||||
sent_count = message_count - time_delta * rate_hz
|
||||
if sent_count < 0:
|
||||
performed_count = action_count - time_delta * rate_hz
|
||||
if performed_count < 0:
|
||||
# Allow, reset back to count 1
|
||||
allowed = True
|
||||
time_start = time_now_s
|
||||
message_count = 1.0
|
||||
elif sent_count > burst_count - 1.0:
|
||||
action_count = 1.0
|
||||
elif performed_count > burst_count - 1.0:
|
||||
# Deny, we have exceeded our burst count
|
||||
allowed = False
|
||||
else:
|
||||
# We haven't reached our limit yet
|
||||
allowed = True
|
||||
message_count += 1
|
||||
action_count += 1.0
|
||||
|
||||
if update:
|
||||
self.message_counts[key] = (message_count, time_start, rate_hz)
|
||||
self.actions[key] = (action_count, time_start, rate_hz)
|
||||
|
||||
if rate_hz > 0:
|
||||
time_allowed = time_start + (message_count - burst_count + 1) / rate_hz
|
||||
# Find out when the count of existing actions expires
|
||||
time_allowed = time_start + (action_count - burst_count + 1) / rate_hz
|
||||
|
||||
# Don't give back a time in the past
|
||||
if time_allowed < time_now_s:
|
||||
time_allowed = time_now_s
|
||||
|
||||
else:
|
||||
# XXX: Why is this -1? This seems to only be used in
|
||||
# self.ratelimit. I guess so that clients get a time in the past and don't
|
||||
# feel afraid to try again immediately
|
||||
time_allowed = -1
|
||||
|
||||
return allowed, time_allowed
|
||||
|
||||
def prune_message_counts(self, time_now_s):
|
||||
for key in list(self.message_counts.keys()):
|
||||
message_count, time_start, rate_hz = self.message_counts[key]
|
||||
time_delta = time_now_s - time_start
|
||||
if message_count - time_delta * rate_hz > 0:
|
||||
break
|
||||
else:
|
||||
del self.message_counts[key]
|
||||
def _prune_message_counts(self, time_now_s: int):
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
Args:
|
||||
time_now_s: The current time
|
||||
"""
|
||||
# We create a copy of the key list here as the dictionary is modified during
|
||||
# the loop
|
||||
for key in list(self.actions.keys()):
|
||||
action_count, time_start, rate_hz = self.actions[key]
|
||||
|
||||
# Rate limit = "seconds since we started limiting this action" * rate_hz
|
||||
# If this limit has not been exceeded, wipe our record of this action
|
||||
time_delta = time_now_s - time_start
|
||||
if action_count - time_delta * rate_hz > 0:
|
||||
continue
|
||||
else:
|
||||
del self.actions[key]
|
||||
|
||||
def ratelimit(
|
||||
self,
|
||||
key: Any,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
_time_now_s: Optional[int] = None,
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
Args:
|
||||
key: An arbitrary key used to classify an action
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
Raises:
|
||||
LimitExceededError: If an action could not be performed, along with the time in
|
||||
milliseconds until the action can be performed again
|
||||
"""
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
|
||||
def ratelimit(self, key, time_now_s, rate_hz, burst_count, update=True):
|
||||
allowed, time_allowed = self.can_do_action(
|
||||
key, time_now_s, rate_hz, burst_count, update
|
||||
key,
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
_time_now_s=time_now_s,
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
|
||||
@@ -17,8 +17,7 @@
|
||||
"""Contains the URL paths to prefix various aspects of the server with. """
|
||||
import hmac
|
||||
from hashlib import sha256
|
||||
|
||||
from six.moves.urllib.parse import urlencode
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
@@ -20,8 +19,8 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Iterable
|
||||
|
||||
from daemonize import Daemonize
|
||||
from typing_extensions import NoReturn
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
@@ -29,9 +28,11 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -127,17 +128,8 @@ def start_reactor(
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app=appname,
|
||||
pid=pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
daemonize_process(pid_file, logger)
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string: str) -> NoReturn:
|
||||
@@ -234,7 +226,7 @@ def refresh_certificate(hs):
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
def start(hs, listeners=None):
|
||||
def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
@@ -245,8 +237,8 @@ def start(hs, listeners=None):
|
||||
notify systemd.
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
|
||||
hs: homeserver instance
|
||||
listeners: Listener configuration ('listeners' in homeserver.yaml)
|
||||
"""
|
||||
try:
|
||||
# Set up the SIGHUP machinery.
|
||||
@@ -276,7 +268,7 @@ def start(hs, listeners=None):
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().db.start_profiling()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
setup_sentry(hs)
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import Dict, Iterable, Optional, Set
|
||||
|
||||
from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet import address, reactor
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
@@ -37,6 +37,7 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.handlers.presence import (
|
||||
@@ -86,7 +87,6 @@ from synapse.replication.tcp.streams import (
|
||||
ReceiptsStream,
|
||||
TagAccountDataStream,
|
||||
ToDeviceStream,
|
||||
TypingStream,
|
||||
)
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events
|
||||
@@ -110,6 +110,7 @@ from synapse.rest.client.v1.room import (
|
||||
RoomSendEventRestServlet,
|
||||
RoomStateEventRestServlet,
|
||||
RoomStateRestServlet,
|
||||
RoomTypingRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups, sync, user_directory
|
||||
@@ -122,17 +123,18 @@ from synapse.rest.client.v2_alpha.account_data import (
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
from synapse.server import HomeServer, cache_in_self
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.data_stores.main.search import SearchWorkerStore
|
||||
from synapse.storage.data_stores.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.databases.main.presence import UserPresenceState
|
||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -205,10 +207,30 @@ class KeyUploadServlet(RestServlet):
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||
headers = {"Authorization": auth_headers}
|
||||
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers = {
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop the the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
# instance of the header. Otherwise, generate a new header.
|
||||
if x_forwarded_for:
|
||||
x_forwarded_for = [
|
||||
x_forwarded_for[0] + b", " + previous_host
|
||||
] + x_forwarded_for[1:]
|
||||
else:
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
@@ -353,9 +375,8 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
|
||||
return _user_syncing()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield get_interested_parties(self.store, states)
|
||||
async def notify_from_replication(self, states, stream_id):
|
||||
parties = await get_interested_parties(self.store, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
@@ -365,8 +386,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
users=users_to_states.keys(),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication_rows(self, token, rows):
|
||||
async def process_replication_rows(self, token, rows):
|
||||
states = [
|
||||
UserPresenceState(
|
||||
row.user_id,
|
||||
@@ -384,7 +404,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
self.user_to_current_state[state.user_id] = state
|
||||
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
await self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
||||
return [
|
||||
@@ -430,37 +450,6 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
await self._bump_active_client(user_id=user_id)
|
||||
|
||||
|
||||
class GenericWorkerTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
self._room_serials[row.room_id] = token
|
||||
self._room_typing[row.room_id] = row.user_ids
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self._latest_room_serial
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
@@ -490,37 +479,27 @@ class GenericWorkerSlavedStore(
|
||||
SearchWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(self, database, db_conn, hs):
|
||||
super(GenericWorkerSlavedStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
# We pull out the current federation stream position now so that we
|
||||
# always have a known value for the federation position in memory so
|
||||
# that we don't have to bounce via a deferred once when we start the
|
||||
# replication streams.
|
||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||
|
||||
def _get_federation_out_pos(self, db_conn):
|
||||
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
txn.execute(sql, ("federation",))
|
||||
rows = txn.fetchall()
|
||||
txn.close()
|
||||
|
||||
return rows[0][0] if rows else -1
|
||||
pass
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
def _listen_http(self, listener_config: ListenerConfig):
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = port
|
||||
|
||||
# We always include a health resource.
|
||||
resources = {"/health": HealthResource()}
|
||||
|
||||
for res in listener_config.http_options.resources:
|
||||
for name in res.names:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
@@ -550,6 +529,7 @@ class GenericWorkerServer(HomeServer):
|
||||
KeyUploadServlet(self).register(resource)
|
||||
AccountDataServlet(self).register(resource)
|
||||
RoomAccountDataServlet(self).register(resource)
|
||||
RoomTypingRestServlet(self).register(resource)
|
||||
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
@@ -590,7 +570,7 @@ class GenericWorkerServer(HomeServer):
|
||||
" repository is disabled. Ignoring."
|
||||
)
|
||||
|
||||
if name == "openid" and "federation" not in res["names"]:
|
||||
if name == "openid" and "federation" not in res.names:
|
||||
# Only load the openid resource separately if federation resource
|
||||
# is not specified since federation resource includes openid
|
||||
# resource.
|
||||
@@ -625,19 +605,19 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
elif listener.type == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
elif listener.type == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
@@ -646,31 +626,29 @@ class GenericWorkerServer(HomeServer):
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
async def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
def build_replication_data_handler(self):
|
||||
@cache_in_self
|
||||
def get_replication_data_handler(self):
|
||||
return GenericWorkerReplicationHandler(self)
|
||||
|
||||
def build_presence_handler(self):
|
||||
@cache_in_self
|
||||
def get_presence_handler(self):
|
||||
return GenericWorkerPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return GenericWorkerTyping(self)
|
||||
|
||||
|
||||
class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
def __init__(self, hs):
|
||||
super(GenericWorkerReplicationHandler, self).__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@@ -707,11 +685,6 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
await self.pusher_pool.on_new_receipts(
|
||||
token, token, {row.room_id for row in rows}
|
||||
)
|
||||
elif stream_name == TypingStream.NAME:
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows]
|
||||
)
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
@@ -738,6 +711,11 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
except Exception:
|
||||
logger.exception("Error processing replication")
|
||||
|
||||
async def on_position(self, stream_name: str, instance_name: str, token: int):
|
||||
await super().on_position(stream_name, instance_name, token)
|
||||
# Also call on_rdata to ensure that stream positions are properly reset.
|
||||
await self.on_rdata(stream_name, instance_name, token, [])
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
if not self.notify_pushers:
|
||||
return
|
||||
@@ -781,19 +759,11 @@ class FederationSenderHandler(object):
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self._hs = hs
|
||||
|
||||
# if the worker is restarted, we want to pick up where we left off in
|
||||
# the replication stream, so load the position from the database.
|
||||
#
|
||||
# XXX is this actually worthwhile? Whenever the master is restarted, we'll
|
||||
# drop some rows anyway (which is mostly fine because we're only dropping
|
||||
# typing and presence notifications). If the replication stream is
|
||||
# unreliable, why do we do all this hoop-jumping to store the position in the
|
||||
# database? See also https://github.com/matrix-org/synapse/issues/7535.
|
||||
#
|
||||
self.federation_position = self.store.federation_out_pos_startup
|
||||
# Stores the latest position in the federation stream we've gotten up
|
||||
# to. This is always set before we use it.
|
||||
self.federation_position = None
|
||||
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
self._last_ack = self.federation_position
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
@@ -863,9 +833,24 @@ class FederationSenderHandler(object):
|
||||
a FEDERATION_ACK back to the master, and stores the token that we have processed
|
||||
in `federation_stream_position` so that we can restart where we left off.
|
||||
"""
|
||||
try:
|
||||
self.federation_position = token
|
||||
self.federation_position = token
|
||||
|
||||
# We save and send the ACK to master asynchronously, so we don't block
|
||||
# processing on persistence. We don't need to do this operation for
|
||||
# every single RDATA we receive, we just need to do it periodically.
|
||||
|
||||
if self._fed_position_linearizer.is_queued(None):
|
||||
# There is already a task queued up to save and send the token, so
|
||||
# no need to queue up another task.
|
||||
return
|
||||
|
||||
run_as_background_process("_save_and_send_ack", self._save_and_send_ack)
|
||||
|
||||
async def _save_and_send_ack(self):
|
||||
"""Save the current federation position in the database and send an ACK
|
||||
to master with where we're up to.
|
||||
"""
|
||||
try:
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
#
|
||||
# XXX this appears to be redundant, since the ReplicationCommandHandler
|
||||
@@ -875,16 +860,17 @@ class FederationSenderHandler(object):
|
||||
# we're not being re-entered?
|
||||
|
||||
with (await self._fed_position_linearizer.queue(None)):
|
||||
# We persist and ack the same position, so we take a copy of it
|
||||
# here as otherwise it can get modified from underneath us.
|
||||
current_position = self.federation_position
|
||||
|
||||
await self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
"federation", current_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self._hs.get_tcp_replication().send_federation_ack(
|
||||
self.federation_position
|
||||
)
|
||||
self._last_ack = self.federation_position
|
||||
self._hs.get_tcp_replication().send_federation_ack(current_position)
|
||||
except Exception:
|
||||
logger.exception("Error updating federation stream position")
|
||||
|
||||
@@ -912,7 +898,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
if config.worker_app == "synapse.app.appservice":
|
||||
if config.notify_appservices:
|
||||
if config.appservice.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
@@ -922,13 +908,13 @@ def start(config_options):
|
||||
sys.exit(1)
|
||||
|
||||
# Force the appservice to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
config.appservice.notify_appservices = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.notify_appservices = False
|
||||
config.appservice.notify_appservices = False
|
||||
|
||||
if config.worker_app == "synapse.app.pusher":
|
||||
if config.start_pushers:
|
||||
if config.server.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
@@ -938,13 +924,13 @@ def start(config_options):
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
config.server.start_pushers = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.start_pushers = False
|
||||
config.server.start_pushers = False
|
||||
|
||||
if config.worker_app == "synapse.app.user_dir":
|
||||
if config.update_user_directory:
|
||||
if config.server.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
@@ -954,13 +940,13 @@ def start(config_options):
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.update_user_directory = True
|
||||
config.server.update_user_directory = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.update_user_directory = False
|
||||
config.server.update_user_directory = False
|
||||
|
||||
if config.worker_app == "synapse.app.federation_sender":
|
||||
if config.send_federation:
|
||||
if config.worker.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
@@ -970,10 +956,10 @@ def start(config_options):
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
config.worker.send_federation = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.send_federation = False
|
||||
config.worker.send_federation = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ import math
|
||||
import os
|
||||
import resource
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
from typing import Iterable
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
@@ -50,12 +49,14 @@ from synapse.app import _base
|
||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import (
|
||||
OptionsResource,
|
||||
RootOptionsRedirectResource,
|
||||
RootRedirect,
|
||||
StaticResource,
|
||||
)
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
@@ -67,6 +68,7 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.well_known import WellKnownResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -89,24 +91,26 @@ def gz_wrap(r):
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConfig):
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = port
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "openid" and "federation" in res["names"]:
|
||||
# We always include a health resource.
|
||||
resources = {"/health": HealthResource()}
|
||||
|
||||
for res in listener_config.http_options.resources:
|
||||
for name in res.names:
|
||||
if name == "openid" and "federation" in res.names:
|
||||
# Skip loading openid resource if federation is defined
|
||||
# since federation resource will include openid
|
||||
continue
|
||||
resources.update(
|
||||
self._configure_named_resource(name, res.get("compress", False))
|
||||
)
|
||||
resources.update(self._configure_named_resource(name, res.compress))
|
||||
|
||||
additional_resources = listener_config.get("additional_resources", {})
|
||||
additional_resources = listener_config.http_options.additional_resources
|
||||
logger.debug("Configuring additional resources: %r", additional_resources)
|
||||
module_api = ModuleApi(self, self.get_auth_handler())
|
||||
for path, resmodule in additional_resources.items():
|
||||
@@ -228,7 +232,7 @@ class SynapseHomeServer(HomeServer):
|
||||
if name in ["static", "client"]:
|
||||
resources.update(
|
||||
{
|
||||
STATIC_PREFIX: File(
|
||||
STATIC_PREFIX: StaticResource(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
)
|
||||
}
|
||||
@@ -278,7 +282,7 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
return resources
|
||||
|
||||
def start_listening(self, listeners):
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
config = self.get_config()
|
||||
|
||||
if config.redis_enabled:
|
||||
@@ -288,25 +292,25 @@ class SynapseHomeServer(HomeServer):
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
if listener.type == "http":
|
||||
self._listening_services.extend(self._listener_http(config, listener))
|
||||
elif listener["type"] == "manhole":
|
||||
elif listener.type == "manhole":
|
||||
listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
)
|
||||
elif listener["type"] == "replication":
|
||||
elif listener.type == "replication":
|
||||
services = listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
ReplicationStreamProtocolFactory(self),
|
||||
)
|
||||
for s in services:
|
||||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||
elif listener["type"] == "metrics":
|
||||
elif listener.type == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warning(
|
||||
(
|
||||
@@ -315,9 +319,11 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||
else:
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
# during parsing
|
||||
logger.warning("Unrecognized listener type: %s", listener.type)
|
||||
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
@@ -377,13 +383,12 @@ def setup(config_options):
|
||||
|
||||
hs.setup_master()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_acme():
|
||||
async def do_acme() -> bool:
|
||||
"""
|
||||
Reprovision an ACME certificate, if it's required.
|
||||
|
||||
Returns:
|
||||
Deferred[bool]: Whether the cert has been updated.
|
||||
Whether the cert has been updated.
|
||||
"""
|
||||
acme = hs.get_acme_handler()
|
||||
|
||||
@@ -402,7 +407,7 @@ def setup(config_options):
|
||||
provision = True
|
||||
|
||||
if provision:
|
||||
yield acme.provision_certificate()
|
||||
await acme.provision_certificate()
|
||||
|
||||
return provision
|
||||
|
||||
@@ -412,7 +417,7 @@ def setup(config_options):
|
||||
Provision a certificate from ACME, if required, and reload the TLS
|
||||
certificate if it's renewed.
|
||||
"""
|
||||
reprovisioned = yield do_acme()
|
||||
reprovisioned = yield defer.ensureDeferred(do_acme())
|
||||
if reprovisioned:
|
||||
_base.refresh_certificate(hs)
|
||||
|
||||
@@ -424,8 +429,8 @@ def setup(config_options):
|
||||
acme = hs.get_acme_handler()
|
||||
# Start up the webservices which we will respond to ACME
|
||||
# challenges with, and then provision.
|
||||
yield acme.start_listening()
|
||||
yield do_acme()
|
||||
yield defer.ensureDeferred(acme.start_listening())
|
||||
yield defer.ensureDeferred(do_acme())
|
||||
|
||||
# Check if it needs to be reprovisioned every day.
|
||||
hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
|
||||
@@ -439,7 +444,7 @@ def setup(config_options):
|
||||
|
||||
_base.start(hs, config.listeners)
|
||||
|
||||
hs.get_datastore().db.updates.start_doing_background_updates()
|
||||
hs.get_datastore().db_pool.updates.start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
@@ -480,50 +485,15 @@ class SynapseService(service.Service):
|
||||
_stats_process = []
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - hs.start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
)
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = hs.config.caches.global_factor
|
||||
stats["event_cache_size"] = hs.config.caches.event_cache_size
|
||||
|
||||
#
|
||||
# Performance statistics
|
||||
# Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test.
|
||||
#
|
||||
old = stats_process[0]
|
||||
new = (now, resource.getrusage(resource.RUSAGE_SELF))
|
||||
@@ -541,17 +511,55 @@ def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
else:
|
||||
stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
|
||||
|
||||
#
|
||||
# General statistics
|
||||
#
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
)
|
||||
stats["total_users"] = await hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = await hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = await hs.get_datastore().count_daily_user_type()
|
||||
for name, count in daily_user_type_results.items():
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = await hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = await hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users()
|
||||
stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = await hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = await hs.get_datastore().count_r30_users()
|
||||
for name, count in r30_results.items():
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = await hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = hs.config.caches.global_factor
|
||||
stats["event_cache_size"] = hs.config.caches.event_cache_size
|
||||
|
||||
#
|
||||
# Database version
|
||||
#
|
||||
|
||||
# This only reports info about the *main* database.
|
||||
stats["database_engine"] = hs.get_datastore().db.engine.module.__name__
|
||||
stats["database_server_version"] = hs.get_datastore().db.engine.server_version
|
||||
stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__
|
||||
stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version
|
||||
|
||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
||||
try:
|
||||
yield hs.get_proxied_http_client().put_json(
|
||||
await hs.get_proxied_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
@@ -617,18 +625,17 @@ def run(hs):
|
||||
clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
|
||||
reap_monthly_active_users()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
async def generate_monthly_active_users():
|
||||
current_mau_count = 0
|
||||
current_mau_count_by_service = {}
|
||||
reserved_users = ()
|
||||
store = hs.get_datastore()
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
current_mau_count = yield store.get_monthly_active_count()
|
||||
current_mau_count = await store.get_monthly_active_count()
|
||||
current_mau_count_by_service = (
|
||||
yield store.get_monthly_active_count_by_service()
|
||||
await store.get_monthly_active_count_by_service()
|
||||
)
|
||||
reserved_users = yield store.get_registered_reserved_users()
|
||||
reserved_users = await store.get_registered_reserved_users()
|
||||
current_mau_gauge.set(float(current_mau_count))
|
||||
|
||||
for app_service, count in current_mau_count_by_service.items():
|
||||
|
||||
@@ -15,13 +15,9 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from six import string_types
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -45,7 +41,7 @@ class AppServiceTransaction(object):
|
||||
Args:
|
||||
as_api(ApplicationServiceApi): The API to use to send.
|
||||
Returns:
|
||||
A Deferred which resolves to True if the transaction was sent.
|
||||
An Awaitable which resolves to True if the transaction was sent.
|
||||
"""
|
||||
return as_api.push_bulk(
|
||||
service=self.service, events=self.events, txn_id=self.id
|
||||
@@ -156,7 +152,7 @@ class ApplicationService(object):
|
||||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if isinstance(regex, string_types):
|
||||
if isinstance(regex, str):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
else:
|
||||
raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
|
||||
@@ -174,8 +170,7 @@ class ApplicationService(object):
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
async def _matches_user(self, event, store):
|
||||
if not event:
|
||||
return False
|
||||
|
||||
@@ -190,12 +185,12 @@ class ApplicationService(object):
|
||||
if not store:
|
||||
return False
|
||||
|
||||
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||
does_match = await self._matches_user_in_member_list(event.room_id, store)
|
||||
return does_match
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = yield store.get_users_in_room(
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = await store.get_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
@@ -210,35 +205,33 @@ class ApplicationService(object):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
async def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
return False
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
alias_list = await store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
async def is_interested(self, event, store=None) -> bool:
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
store(DataStore)
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
True if this service would like to know about this event.
|
||||
"""
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
return True
|
||||
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
if await self._matches_aliases(event, store):
|
||||
return True
|
||||
|
||||
if (yield self._matches_user(event, store)):
|
||||
if await self._matches_user(event, store):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -13,14 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
import urllib
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.constants import EventTypes, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
@@ -94,14 +93,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
async def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
return False
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -112,14 +109,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
async def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
return False
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -130,8 +125,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
async def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
@@ -148,7 +142,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
response = await self.get_json(uri, fields)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
@@ -181,7 +175,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
info = yield defer.ensureDeferred(self.get_json(uri, {}))
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
@@ -204,12 +198,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
async def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
return True
|
||||
|
||||
events = self._serialize(events)
|
||||
events = self._serialize(service, events)
|
||||
|
||||
if txn_id is None:
|
||||
logger.warning(
|
||||
@@ -220,7 +213,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id))
|
||||
try:
|
||||
yield self.put_json(
|
||||
await self.put_json(
|
||||
uri=uri,
|
||||
json_body={"events": events},
|
||||
args={"access_token": service.hs_token},
|
||||
@@ -235,6 +228,18 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
return False
|
||||
|
||||
def _serialize(self, events):
|
||||
def _serialize(self, service, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [serialize_event(e, time_now, as_client_event=True) for e in events]
|
||||
return [
|
||||
serialize_event(
|
||||
e,
|
||||
time_now,
|
||||
as_client_event=True,
|
||||
is_invite=(
|
||||
e.type == EventTypes.Member
|
||||
and e.membership == "invite"
|
||||
and service.is_interested_in_user(e.state_key)
|
||||
),
|
||||
)
|
||||
for e in events
|
||||
]
|
||||
|
||||
@@ -50,8 +50,6 @@ components.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -73,12 +71,11 @@ class ApplicationServiceScheduler(object):
|
||||
self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
async def start(self):
|
||||
logger.info("Starting appservice scheduler")
|
||||
|
||||
# check for any DOWN ASes and start recoverers for them.
|
||||
services = yield self.store.get_appservices_by_state(
|
||||
services = await self.store.get_appservices_by_state(
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
|
||||
@@ -117,8 +114,7 @@ class _ServiceQueuer(object):
|
||||
"as-sender-%s" % (service.id,), self._send_request, service
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
async def _send_request(self, service):
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
# running.
|
||||
assert service.id not in self.requests_in_flight
|
||||
@@ -130,7 +126,7 @@ class _ServiceQueuer(object):
|
||||
if not events:
|
||||
return
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
await self.txn_ctrl.send(service, events)
|
||||
except Exception:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
@@ -162,36 +158,33 @@ class _TransactionController(object):
|
||||
# for UTs
|
||||
self.RECOVERER_CLASS = _Recoverer
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send(self, service, events):
|
||||
async def send(self, service, events):
|
||||
try:
|
||||
txn = yield self.store.create_appservice_txn(service=service, events=events)
|
||||
service_is_up = yield self._is_service_up(service)
|
||||
txn = await self.store.create_appservice_txn(service=service, events=events)
|
||||
service_is_up = await self._is_service_up(service)
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
sent = await txn.send(self.as_api)
|
||||
if sent:
|
||||
yield txn.complete(self.store)
|
||||
await txn.complete(self.store)
|
||||
else:
|
||||
run_in_background(self._on_txn_fail, service)
|
||||
except Exception:
|
||||
logger.exception("Error creating appservice transaction")
|
||||
run_in_background(self._on_txn_fail, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
async def on_recovered(self, recoverer):
|
||||
logger.info(
|
||||
"Successfully recovered application service AS ID %s", recoverer.service.id
|
||||
)
|
||||
self.recoverers.pop(recoverer.service.id)
|
||||
logger.info("Remaining active recoverers: %s", len(self.recoverers))
|
||||
yield self.store.set_appservice_state(
|
||||
await self.store.set_appservice_state(
|
||||
recoverer.service, ApplicationServiceState.UP
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_txn_fail(self, service):
|
||||
async def _on_txn_fail(self, service):
|
||||
try:
|
||||
yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
|
||||
await self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
|
||||
self.start_recoverer(service)
|
||||
except Exception:
|
||||
logger.exception("Error starting AS recoverer")
|
||||
@@ -211,9 +204,8 @@ class _TransactionController(object):
|
||||
recoverer.recover()
|
||||
logger.info("Now %i active recoverers", len(self.recoverers))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
state = yield self.store.get_appservice_state(service)
|
||||
async def _is_service_up(self, service):
|
||||
state = await self.store.get_appservice_state(service)
|
||||
return state == ApplicationServiceState.UP or state is None
|
||||
|
||||
|
||||
@@ -254,25 +246,24 @@ class _Recoverer(object):
|
||||
self.backoff_counter += 1
|
||||
self.recover()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def retry(self):
|
||||
async def retry(self):
|
||||
logger.info("Starting retries on %s", self.service.id)
|
||||
try:
|
||||
while True:
|
||||
txn = yield self.store.get_oldest_unsent_txn(self.service)
|
||||
txn = await self.store.get_oldest_unsent_txn(self.service)
|
||||
if not txn:
|
||||
# nothing left: we're done!
|
||||
self.callback(self)
|
||||
await self.callback(self)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Retrying transaction %s for AS ID %s", txn.id, txn.service.id
|
||||
)
|
||||
sent = yield txn.send(self.as_api)
|
||||
sent = await txn.send(self.as_api)
|
||||
if not sent:
|
||||
break
|
||||
|
||||
yield txn.complete(self.store)
|
||||
await txn.complete(self.store)
|
||||
|
||||
# reset the backoff counter and then process the next transaction
|
||||
self.backoff_counter = 1
|
||||
|
||||
@@ -16,6 +16,7 @@ from synapse.config._base import ConfigError
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
action = sys.argv[1]
|
||||
|
||||
@@ -18,12 +18,16 @@
|
||||
import argparse
|
||||
import errno
|
||||
import os
|
||||
import time
|
||||
import urllib.parse
|
||||
from collections import OrderedDict
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import Any, MutableMapping, Optional
|
||||
|
||||
from six import integer_types
|
||||
from typing import Any, Callable, List, MutableMapping, Optional
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
import pkg_resources
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -100,6 +104,11 @@ class Config(object):
|
||||
def __init__(self, root_config=None):
|
||||
self.root = root_config
|
||||
|
||||
# Get the path to the default Synapse template directory
|
||||
self.default_template_dir = pkg_resources.resource_filename(
|
||||
"synapse", "res/templates"
|
||||
)
|
||||
|
||||
def __getattr__(self, item: str) -> Any:
|
||||
"""
|
||||
Try and fetch a configuration option that does not exist on this class.
|
||||
@@ -117,7 +126,7 @@ class Config(object):
|
||||
|
||||
@staticmethod
|
||||
def parse_size(value):
|
||||
if isinstance(value, integer_types):
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
@@ -129,7 +138,7 @@ class Config(object):
|
||||
|
||||
@staticmethod
|
||||
def parse_duration(value):
|
||||
if isinstance(value, integer_types):
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
second = 1000
|
||||
minute = 60 * second
|
||||
@@ -184,6 +193,95 @@ class Config(object):
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
|
||||
def read_templates(
|
||||
self, filenames: List[str], custom_template_directory: Optional[str] = None,
|
||||
) -> List[jinja2.Template]:
|
||||
"""Load a list of template files from disk using the given variables.
|
||||
|
||||
This function will attempt to load the given templates from the default Synapse
|
||||
template directory. If `custom_template_directory` is supplied, that directory
|
||||
is tried first.
|
||||
|
||||
Files read are treated as Jinja templates. These templates are not rendered yet.
|
||||
|
||||
Args:
|
||||
filenames: A list of template filenames to read.
|
||||
|
||||
custom_template_directory: A directory to try to look for the templates
|
||||
before using the default Synapse template directory instead.
|
||||
|
||||
Raises:
|
||||
ConfigError: if the file's path is incorrect or otherwise cannot be read.
|
||||
|
||||
Returns:
|
||||
A list of jinja2 templates.
|
||||
"""
|
||||
templates = []
|
||||
search_directories = [self.default_template_dir]
|
||||
|
||||
# The loader will first look in the custom template directory (if specified) for the
|
||||
# given filename. If it doesn't find it, it will use the default template dir instead
|
||||
if custom_template_directory:
|
||||
# Check that the given template directory exists
|
||||
if not self.path_exists(custom_template_directory):
|
||||
raise ConfigError(
|
||||
"Configured template directory does not exist: %s"
|
||||
% (custom_template_directory,)
|
||||
)
|
||||
|
||||
# Search the custom template directory as well
|
||||
search_directories.insert(0, custom_template_directory)
|
||||
|
||||
loader = jinja2.FileSystemLoader(search_directories)
|
||||
env = jinja2.Environment(loader=loader, autoescape=True)
|
||||
|
||||
# Update the environment with our custom filters
|
||||
env.filters.update(
|
||||
{
|
||||
"format_ts": _format_ts_filter,
|
||||
"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
|
||||
}
|
||||
)
|
||||
|
||||
for filename in filenames:
|
||||
# Load the template
|
||||
template = env.get_template(filename)
|
||||
templates.append(template)
|
||||
|
||||
return templates
|
||||
|
||||
|
||||
def _format_ts_filter(value: int, format: str):
|
||||
return time.strftime(format, time.localtime(value / 1000))
|
||||
|
||||
|
||||
def _create_mxc_to_http_filter(public_baseurl: str) -> Callable:
|
||||
"""Create and return a jinja2 filter that converts MXC urls to HTTP
|
||||
|
||||
Args:
|
||||
public_baseurl: The public, accessible base URL of the homeserver
|
||||
"""
|
||||
|
||||
def mxc_to_http_filter(value, width, height, resize_method="crop"):
|
||||
if value[0:6] != "mxc://":
|
||||
return ""
|
||||
|
||||
server_and_media_id = value[6:]
|
||||
fragment = None
|
||||
if "#" in server_and_media_id:
|
||||
server_and_media_id, fragment = server_and_media_id.split("#", 1)
|
||||
fragment = "#" + fragment
|
||||
|
||||
params = {"width": width, "height": height, "method": resize_method}
|
||||
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
|
||||
public_baseurl,
|
||||
server_and_media_id,
|
||||
urllib.parse.urlencode(params),
|
||||
fragment or "",
|
||||
)
|
||||
|
||||
return mxc_to_http_filter
|
||||
|
||||
|
||||
class RootConfig(object):
|
||||
"""
|
||||
@@ -719,4 +817,36 @@ def find_config_files(search_paths):
|
||||
return config_files
|
||||
|
||||
|
||||
__all__ = ["Config", "RootConfig"]
|
||||
@attr.s
|
||||
class ShardedWorkerHandlingConfig:
|
||||
"""Algorithm for choosing which instance is responsible for handling some
|
||||
sharded work.
|
||||
|
||||
For example, the federation senders use this to determine which instances
|
||||
handles sending stuff to a given destination (which is used as the `key`
|
||||
below).
|
||||
"""
|
||||
|
||||
instances = attr.ib(type=List[str])
|
||||
|
||||
def should_handle(self, instance_name: str, key: str) -> bool:
|
||||
"""Whether this instance is responsible for handling the given key.
|
||||
"""
|
||||
|
||||
# If multiple instances are not defined we always return true.
|
||||
if not self.instances or len(self.instances) == 1:
|
||||
return True
|
||||
|
||||
# We shard by taking the hash, modulo it by the number of instances and
|
||||
# then checking whether this instance matches the instance at that
|
||||
# index.
|
||||
#
|
||||
# (Technically this introduces some bias and is not entirely uniform,
|
||||
# but since the hash is so large the bias is ridiculously small).
|
||||
dest_hash = sha256(key.encode("utf8")).digest()
|
||||
dest_int = int.from_bytes(dest_hash, byteorder="little")
|
||||
remainder = dest_int % (len(self.instances))
|
||||
return self.instances[remainder] == instance_name
|
||||
|
||||
|
||||
__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
|
||||
|
||||
@@ -137,3 +137,8 @@ class Config:
|
||||
|
||||
def read_config_files(config_files: List[str]): ...
|
||||
def find_config_files(search_paths: List[str]): ...
|
||||
|
||||
class ShardedWorkerHandlingConfig:
|
||||
instances: List[str]
|
||||
def __init__(self, instances: List[str]) -> None: ...
|
||||
def should_handle(self, instance_name: str, key: str) -> bool: ...
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user