1
0

Compare commits

..

300 Commits

Author SHA1 Message Date
Richard van der Hoff
d6eae548a7 Merge branch 'release-v1.10.0' into matrix-org-hotfixes 2020-02-11 10:43:32 +00:00
Richard van der Hoff
e439438b9b Merge branch 'release-v1.10.0' into matrix-org-hotfixes 2020-02-10 09:56:51 +00:00
Richard van der Hoff
f8a1e0d1d2 Merge branch 'release-v1.10.0' into matrix-org-hotfixes 2020-02-10 09:54:40 +00:00
Erik Johnston
8a29def84a Add support for putting fed user query API on workers (#6873) 2020-02-07 15:59:05 +00:00
Erik Johnston
77a166577a Allow moving group read APIs to workers (#6866) 2020-02-07 13:57:07 +00:00
Erik Johnston
7d5268d37c Merge branch 'release-v1.10.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-02-06 10:26:39 +00:00
Erik Johnston
c854d255e5 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-31 15:06:16 +00:00
Brendan Abolivier
c660962d4d Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-22 13:48:11 +00:00
Richard van der Hoff
767bef0033 Merge branch 'rav/storage_provider_debug' into matrix-org-hotfixes 2020-01-21 23:03:22 +00:00
Richard van der Hoff
4d02bfd6e1 a bit of debugging for media storage providers 2020-01-21 23:02:58 +00:00
Andrew Morgan
a099ab7d38 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-14 10:37:32 +00:00
Erik Johnston
ce72a9ccdb Merge branch 'erikj/media_admin_apis' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-08 15:52:58 +00:00
Erik Johnston
bace86ed15 Merge branch 'release-v1.8.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-08 15:52:48 +00:00
Erik Johnston
45bf455948 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2020-01-07 14:24:36 +00:00
Richard van der Hoff
859663565c Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2020-01-06 15:43:41 +00:00
Richard van der Hoff
0876a5b641 Merge branch 'release-v1.7.3' into matrix-org-hotfixes 2019-12-31 10:47:29 +00:00
Richard van der Hoff
5b5314ee41 Merge branch 'release-v1.7.2' into matrix-org-hotfixes 2019-12-20 10:48:04 +00:00
Richard van der Hoff
aff9189149 Merge remote-tracking branch 'origin/release-v1.7.1' into matrix-org-hotfixes 2019-12-17 16:00:43 +00:00
Richard van der Hoff
2eda49a8db Merge remote-tracking branch 'origin/release-v1.7.1' into matrix-org-hotfixes 2019-12-17 10:56:36 +00:00
Richard van der Hoff
96b17d4e4f Merge remote-tracking branch 'origin/release-v1.7.0' into matrix-org-hotfixes 2019-12-17 10:56:26 +00:00
Erik Johnston
aadc131dc1 Merge branch 'babolivier/pusher-room-store' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-12-10 12:50:04 +00:00
Neil Johnson
0a522121a0 Merge branch 'release-v1.7.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-12-10 11:25:28 +00:00
Andrew Morgan
0b5e2c8093 Merge branch 'release-v1.6.1' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-28 11:40:33 +00:00
Erik Johnston
c665d154a2 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-26 18:56:54 +00:00
Neil Johnson
31295b5a60 Merge branch 'release-v1.6.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-26 13:16:18 +00:00
Erik Johnston
aebe20c452 Fix phone home stats (#6418)
Fix phone home stats
2019-11-26 13:10:09 +00:00
Andrew Morgan
508e0f9310 1.6.0 2019-11-26 12:15:46 +00:00
Andrew Morgan
e04e7e830e Merge branch 'release-v1.6.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-26 12:06:38 +00:00
Andrew Morgan
5407e69732 Change /push/v1/notify IP to 10.103.0.7 2019-11-26 12:04:19 +00:00
Erik Johnston
2c59eb368c Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-20 15:17:10 +00:00
Erik Johnston
6d1a3e2bdd Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-11-19 12:59:39 +00:00
Richard van der Hoff
7fa4586e36 Merge branch 'rav/url_preview_limit_title_2' into matrix-org-hotfixes 2019-11-05 18:18:02 +00:00
Erik Johnston
33b4aa8d99 Merge branch 'release-v1.5.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-29 12:18:44 +00:00
Erik Johnston
627cf5def8 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-25 11:35:14 +01:00
Erik Johnston
b409d51dee Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-25 10:19:09 +01:00
Erik Johnston
4a4e620f30 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-11 10:40:15 +01:00
Richard van der Hoff
28889d8da5 fix logging 2019-10-11 09:57:18 +01:00
Richard van der Hoff
15b2a50817 Add some randomness to the high-cpu backoff hack 2019-10-11 09:15:56 +01:00
Richard van der Hoff
b852a8247d Awful hackery to try to get the fed sender to keep up
Basically, if the federation sender starts getting behind, insert some sleeps
into the transaction transmission code to give the fed sender a chance to catch
up.

Might have to experiment a bit with the numbers.
2019-10-10 10:34:08 +01:00
Erik Johnston
7b55cca011 Merge branch 'erikj/cache_memberships' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-07 13:15:22 +01:00
Richard van der Hoff
a9577ab1f4 Merge branch 'develop' into matrix-org-hotfixes 2019-10-03 17:52:22 +01:00
Richard van der Hoff
cb217d5d60 Revert "Awful hackery to try to get the fed sender to keep up"
This reverts commit 721086a291.

This didn't help.
2019-10-03 17:05:24 +01:00
Andrew Morgan
f4f5355bcf Merge branch 'release-v1.4.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-03 13:06:32 +01:00
Erik Johnston
23bb2713d2 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-02 16:51:08 +01:00
Erik Johnston
b2471e1109 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-02 15:39:31 +01:00
Erik Johnston
610219d53d Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-02 14:09:29 +01:00
Erik Johnston
b464afe283 Merge branch 'release-v1.4.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-10-02 11:09:05 +01:00
Richard van der Hoff
7657ad3ced Merge branch 'rav/federation_sender_hackery' into matrix-org-hotfixes 2019-09-27 16:14:52 +01:00
Richard van der Hoff
721086a291 Awful hackery to try to get the fed sender to keep up
Basically, if the federation sender starts getting behind, insert some sleeps
into the transaction transmission code to give the fed sender a chance to catch
up.

Might have to experiment a bit with the numbers.
2019-09-27 16:13:51 +01:00
Richard van der Hoff
6e6b53ed3a Merge branch 'develop' into matrix-org-hotfixes 2019-09-26 15:22:33 +01:00
Richard van der Hoff
601b50672d Merge branch 'develop' into matrix-org-hotfixes 2019-09-25 12:48:40 +01:00
Richard van der Hoff
a7af389da0 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-09-24 17:05:15 +01:00
Neil Johnson
99db0d76fd Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-24 14:56:12 +01:00
Richard van der Hoff
561b0f79bc Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-09-24 10:11:19 +01:00
Richard van der Hoff
8569f3cdef Merge branch 'rav/fix_retry_reset' into matrix-org-hotfixes 2019-09-20 12:14:19 +01:00
Richard van der Hoff
7b61e6f5d6 Merge branch 'develop' into matrix-org-hotfixes 2019-09-18 13:55:25 +01:00
Richard van der Hoff
05241b3031 Revert "Fix m.federate bug"
This has now been merged into develop (142c9325c) so we no longer need this
cherry-picked commit.

This reverts commit ee91c69ef7.
2019-09-18 13:54:57 +01:00
Richard van der Hoff
e01026d84d Revert "Fix existing v2 identity server calls (MSC2140) (#6013)"
This has now been merged into develop (3505ffcda) so we don't need this
cherry-picked commit.

This reverts commit e0eef47315.
2019-09-18 13:53:37 +01:00
Erik Johnston
ee91c69ef7 Fix m.federate bug 2019-09-13 14:44:48 +01:00
Andrew Morgan
e0eef47315 Fix existing v2 identity server calls (MSC2140) (#6013)
Two things I missed while implementing [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140/files#diff-c03a26de5ac40fb532de19cb7fc2aaf7R80).

1. Access tokens should be provided to the identity server as `access_token`, not `id_access_token`, even though the homeserver may accept the tokens as `id_access_token`.
2. Access tokens must be sent to the identity server in a query parameter, the JSON body is not allowed.

We now send the access token as part of an `Authorization: ...` header, which fixes both things.

The breaking code was added in https://github.com/matrix-org/synapse/pull/5892

Sytest PR: https://github.com/matrix-org/sytest/pull/697
2019-09-13 14:08:26 +01:00
Erik Johnston
44d2ca2990 Merge branch 'anoa/fix_3pid_validation' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-10 18:15:24 +01:00
Erik Johnston
9240622c1a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-06 14:10:53 +01:00
Erik Johnston
0dbba85e95 Merge branch 'anoa/worker_store_reg' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-06 13:02:12 +01:00
Andrew Morgan
1ceeccb769 Move get_threepid_validation_session into RegistrationWorkerStore 2019-09-06 13:00:34 +01:00
Erik Johnston
39883e85bd Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-06 12:50:28 +01:00
Erik Johnston
68f53b7a0e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-06 09:53:37 +01:00
Erik Johnston
e679b008ff Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-05 15:23:40 +01:00
Erik Johnston
e80a5b7492 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-09-04 13:13:30 +01:00
Richard van der Hoff
b272e7345f Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-08-30 12:01:24 +01:00
Erik Johnston
a81e0233e9 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-08-29 11:18:57 +01:00
Richard van der Hoff
80898481ab Merge branch 'release-v1.3.1' into matrix-org-hotfixes 2019-08-17 09:22:30 +01:00
Brendan Abolivier
9d4c716d85 Merge branch 'release-v1.3.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-08-15 11:36:00 +01:00
Brendan Abolivier
d90b0946ed Temporary fix to ensure kde can contact matrix.org if stuff breaks 2019-08-13 18:05:06 +01:00
Brendan Abolivier
8d5762b0dc Merge branch 'release-v1.3.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-08-13 17:39:30 +01:00
Brendan Abolivier
a7efbc5416 Merge branch 'release-v1.3.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-08-13 15:54:01 +01:00
Richard van der Hoff
be362cb8f8 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-08-13 10:52:19 +01:00
Erik Johnston
873ff9522b Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-08-01 14:46:09 +01:00
Erik Johnston
c1ee2999a0 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-31 10:01:56 +01:00
Erik Johnston
9b2b386f76 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-30 13:26:19 +01:00
Erik Johnston
65fe31786d Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-30 10:12:13 +01:00
Andrew Morgan
70b6d1dfd6 Merge branch 'release-v1.2.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-24 13:32:41 +01:00
Erik Johnston
ee62aed72e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-23 10:23:40 +01:00
Erik Johnston
c02f26319d Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-23 09:20:26 +01:00
Andrew Morgan
fdd182870c Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-22 10:19:16 +01:00
Richard van der Hoff
4102cb220a Merge branch 'release-v1.2.0' into matrix-org-hotfixes 2019-07-18 15:20:00 +01:00
Erik Johnston
5299707329 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-07-17 10:56:55 +01:00
Richard van der Hoff
43e01be158 Merge remote-tracking branch 'origin/release-v1.1.0' into matrix-org-hotfixes 2019-07-03 09:49:35 +01:00
Richard van der Hoff
589e080c6b Merge branch 'release-v1.1.0' into matrix-org-hotfixes 2019-07-03 09:47:55 +01:00
Richard van der Hoff
24e48bc9ff Merge branch 'release-v1.1.0' into matrix-org-hotfixes 2019-07-02 12:05:33 +01:00
Erik Johnston
576b62a6a3 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-28 10:04:54 +01:00
Erik Johnston
ad2ba70959 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-24 15:31:36 +01:00
Erik Johnston
a330505025 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-21 14:36:13 +01:00
Erik Johnston
67b73fd147 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-21 13:27:04 +01:00
Erik Johnston
c08e4dbadc Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-17 14:10:28 +01:00
Erik Johnston
6dbd498772 Merge branch 'master' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-11 17:25:54 +01:00
Erik Johnston
03b09b32d6 Merge branch 'release-v1.0.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-11 14:00:50 +01:00
Erik Johnston
8f1711da0e Merge branch 'release-v1.0.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-11 00:23:54 +01:00
Erik Johnston
6fb6c98f71 Merge branch 'release-v1.0.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-10 18:34:45 +01:00
Erik Johnston
aad993f24d Merge branch 'release-v1.0.0' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-10 16:05:10 +01:00
Erik Johnston
544e101c24 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-06-04 16:58:38 +01:00
Richard van der Hoff
8699f380f0 hotfix RetryLimiter 2019-06-04 12:14:41 +01:00
Richard van der Hoff
e91a68ef3a Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-06-04 11:59:55 +01:00
Richard van der Hoff
9f5048c198 Merge branch 'rav/limit_displayname_length' into matrix-org-hotfixes 2019-06-01 11:15:43 +01:00
Erik Johnston
b3c40ba58a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-05-31 10:58:47 +01:00
Erik Johnston
8d69193a42 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-05-30 14:33:44 +01:00
Erik Johnston
bbcd19f2d0 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-05-24 10:53:01 +01:00
Erik Johnston
3cd598135f Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-05-23 15:54:13 +01:00
Richard van der Hoff
1c8f2c34ff Merge branch 'develop' into matrix-org-hotfixes 2019-05-21 16:29:25 +01:00
Richard van der Hoff
ca03f90ee7 Merge branch 'develop' into matrix-org-hotfixes 2019-05-20 15:55:39 +01:00
Richard van der Hoff
9feee29d76 Merge tag 'v0.99.4rc1' into matrix-org-hotfixes
v0.99.4rc1
2019-05-14 11:12:22 +01:00
Richard van der Hoff
e7dcee13da Merge commit 'a845abbf3' into matrix-org-hotfixes 2019-05-03 17:12:28 +01:00
Richard van der Hoff
7467738834 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2019-05-02 13:37:35 +01:00
Erik Johnston
d75fb8ae22 Merge branch 'erikj/ratelimit_3pid_invite' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-04-26 18:12:33 +01:00
Erik Johnston
ae25a8efef Merge branch 'erikj/postpath' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-04-17 10:14:57 +01:00
Richard van der Hoff
fc5be50d56 skip send without trailing slash 2019-04-16 15:16:57 +01:00
Erik Johnston
aadba440da Point pusher to new box 2019-04-15 19:23:21 +01:00
Erik Johnston
ec94d6a590 VersionRestServlet doesn't take a param 2019-04-15 19:21:32 +01:00
Erik Johnston
42ce90c3f7 Merge branch 'erikj/move_endpoints' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-04-15 18:56:46 +01:00
Erik Johnston
8467756dc1 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-04-04 14:43:57 +01:00
Erik Johnston
613b443ff0 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-04-02 18:25:45 +01:00
Richard van der Hoff
233b61ac61 Remove spurious changelog files from hotfixes
The relevant patches are now in develop thanks to
https://github.com/matrix-org/synapse/pull/4816.
2019-04-02 13:51:37 +01:00
Richard van der Hoff
f41c9d37d6 Merge branch 'develop' into matrix-org-hotfixes 2019-04-02 13:47:08 +01:00
Neil Johnson
1048e2ca6a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-27 09:18:35 +00:00
Richard van der Hoff
ce0ce1add3 Merge branch 'develop' into matrix-org-hotfixes 2019-03-25 16:48:56 +00:00
Erik Johnston
b0bf1ea7bd Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-21 14:10:31 +00:00
Richard van der Hoff
2561b628af Merge branch 'develop' into matrix-org-hotfixes 2019-03-19 12:19:20 +00:00
Richard van der Hoff
73c6630718 Revert "Reinstate EDU-batching hacks"
This reverts commit ed8ccc3737.
2019-03-19 12:17:28 +00:00
Erik Johnston
a189bb03ab Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-14 14:39:06 +00:00
Erik Johnston
404a2d70be Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-14 13:55:29 +00:00
Richard van der Hoff
ed8ccc3737 Reinstate EDU-batching hacks
This reverts commit c7285607a3.
2019-03-13 14:42:11 +00:00
Erik Johnston
18b1a92162 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-08 09:56:17 +00:00
Amber Brown
199aa72d35 Merge branch 'develop' of ssh://github.com/matrix-org/synapse into
matrix-org-hotfixes
2019-03-07 21:43:10 +11:00
Erik Johnston
8f7dbbc14a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-06 19:30:30 +00:00
Erik Johnston
27dbc9ac42 Reenable presence tests and remove pointless change 2019-03-06 17:12:45 +00:00
Richard van der Hoff
e9aa401994 Remove redundant changes from synapse/replication/tcp/streams.py (#4813)
This was some hacky code (introduced in c10c71e70d) to make the presence stream
do nothing on hotfixes. We now ensure that no replication clients subscribe to
the presence stream, so this is redundant.
2019-03-06 13:21:32 +00:00
Richard van der Hoff
9e9572c79e Run black on synapse/handlers/user_directory.py (#4812)
This got done on the develop branch in #4635, but the subsequent merge to
hotfixes (88af0317a) discarded the changes for some reason.

Fixing this here and now means (a) there are fewer differences between
matrix-org-hotfixes and develop, making future patches easier to merge, and (b)
fixes some pep8 errors on the hotfixes branch which have been annoying me for
some time.
2019-03-06 11:56:03 +00:00
Richard van der Hoff
c7285607a3 Revert EDU-batching hacks from matrix-org-hotfixes
Firstly: we want to do this in a better way, which is the intention of
too many RRs, which means we need to make it happen again.

This reverts commits: 8d7c0264b 000d23090 eb0334b07 4d07dc0d1
2019-03-06 11:04:53 +00:00
Erik Johnston
a6e2546980 Fix outbound federation 2019-03-05 14:50:37 +00:00
Erik Johnston
dc510e0e43 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-03-05 14:41:13 +00:00
Richard van der Hoff
ed12338f35 Remove #4733 debug (#4767)
We don't need any of this stuff now; this brings protocol.py back into line
with develop for the hotfixes branch.
2019-03-04 14:00:03 +00:00
Richard van der Hoff
bf3f8b8855 Add more debug for #4422 (#4769) 2019-02-28 17:46:22 +00:00
Richard van der Hoff
67acd1aa1b Merge branch 'develop' into matrix-org-hotfixes 2019-02-27 10:29:24 +00:00
Erik Johnston
75c924430e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-26 09:36:29 +00:00
Richard van der Hoff
6087c53830 Add more debug for membership syncing issues (#4719) 2019-02-25 17:00:18 +00:00
Erik Johnston
b50fe65a22 Add logging when sending error 2019-02-25 15:55:21 +00:00
Erik Johnston
17009e689b Merge pull request #4734 from matrix-org/rav/repl_debug
Add some debug to help with #4733
2019-02-25 15:52:45 +00:00
Richard van der Hoff
5d2f755d3f Add some debug to help with #4733 2019-02-25 14:37:23 +00:00
Richard van der Hoff
8d7c0264bc more fix edu batching hackery 2019-02-24 23:27:52 +00:00
Richard van der Hoff
000d230901 fix edu batching hackery 2019-02-24 23:19:37 +00:00
Richard van der Hoff
eb0334b07c more edu batching hackery 2019-02-24 23:15:09 +00:00
Richard van der Hoff
4d07dc0d18 Add a delay to the federation loop for EDUs 2019-02-24 22:24:36 +00:00
Erik Johnston
0ea52872ab Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-22 15:29:41 +00:00
Richard van der Hoff
6868d53fe9 bail out early in on_new_receipts if no pushers 2019-02-21 15:58:15 +00:00
Richard van der Hoff
68af15637b Merge branch 'develop' into matrix-org-hotfixes 2019-02-20 14:24:17 +00:00
Richard van der Hoff
4da63d9f6f Merge branch 'develop' into matrix-org-hotfixes 2019-02-20 14:15:56 +00:00
Richard van der Hoff
085d69b0bd Apply the pusher http hack in the right place (#4692)
Do it in the constructor, so that it works for badge updates as well as pushes
2019-02-20 11:25:10 +00:00
Erik Johnston
776fe6c184 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-20 09:52:24 +00:00
Erik Johnston
0e07d2c7d5 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-19 13:24:37 +00:00
Erik Johnston
90ec885805 Revert "Merge pull request #4654 from matrix-org/hawkowl/registration-worker"
This reverts commit 5bd2e2c31d, reversing
changes made to d97c3a6ce6.
2019-02-19 13:23:17 +00:00
Erik Johnston
5a28154c4d Revert "Merge pull request #4655 from matrix-org/hawkowl/registration-worker"
This reverts commit 93555af5c9, reversing
changes made to 5bd2e2c31d.
2019-02-19 13:23:14 +00:00
Erik Johnston
2fcb51e703 Merge branch 'matthew/well-known-cors' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-18 18:38:49 +00:00
Erik Johnston
26f524872f Revert change that cached connection factory 2019-02-18 18:36:54 +00:00
Erik Johnston
88af0317a2 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-15 22:39:13 +00:00
Erik Johnston
c10c71e70d Emergency changes 2019-02-15 18:15:21 +00:00
Erik Johnston
93555af5c9 Merge pull request #4655 from matrix-org/hawkowl/registration-worker
Device replication
2019-02-15 18:12:49 +00:00
Amber Brown
06622e4110 fix 2019-02-16 05:11:09 +11:00
Amber Brown
155efa9e36 fix 2019-02-16 05:10:48 +11:00
Amber Brown
3175edc5d8 maybe 2019-02-16 05:09:08 +11:00
Amber Brown
d95252c01f use a device replication thingy 2019-02-16 05:08:58 +11:00
Erik Johnston
5bd2e2c31d Merge pull request #4654 from matrix-org/hawkowl/registration-worker
Registration worker
2019-02-15 17:51:34 +00:00
Amber Brown
84528e4fb2 cleanup 2019-02-16 04:49:09 +11:00
Amber Brown
e4381ed514 pep8 2019-02-16 04:42:04 +11:00
Amber Brown
d9235b9e29 fix appservice, add to frontend proxy 2019-02-16 04:39:49 +11:00
Amber Brown
ce5f3b1ba5 add all the files 2019-02-16 04:35:58 +11:00
Amber Brown
7b5c04312e isort 2019-02-16 04:35:27 +11:00
Amber Brown
f5bafd70f4 add cache remover endpoint and wire it up 2019-02-16 04:34:23 +11:00
Richard van der Hoff
d97c3a6ce6 Merge remote-tracking branch 'origin/release-v0.99.1' into matrix-org-hotfixes 2019-02-13 14:29:05 +00:00
Erik Johnston
341c35614a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-02-13 10:29:31 +00:00
Richard van der Hoff
fecf28319c Merge branch 'release-v0.99.0' into matrix-org-hotfixes 2019-02-01 13:29:31 +00:00
Richard van der Hoff
345d8cfb69 Merge branch 'release-v0.99.0' into matrix-org-hotfixes 2019-02-01 13:21:42 +00:00
Richard van der Hoff
b60d005156 Merge branch 'develop' into matrix-org-hotfixes 2019-01-31 18:44:04 +00:00
Richard van der Hoff
6c232a69df Revert "Break infinite loop on redaction in v3 rooms"
We've got a better fix of this now.

This reverts commit decb5698b3.
2019-01-31 18:43:49 +00:00
Amber Brown
e97c1df30c remove slow code on userdir (#4534) 2019-01-31 13:26:38 +00:00
Richard van der Hoff
decb5698b3 Break infinite loop on redaction in v3 rooms 2019-01-31 00:23:58 +00:00
Erik Johnston
62962e30e4 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-30 17:04:08 +00:00
Erik Johnston
05413d4e20 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-30 14:27:19 +00:00
Erik Johnston
ca46dcf683 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-30 13:11:25 +00:00
Erik Johnston
d351be1567 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-30 11:48:29 +00:00
Andrew Morgan
c7f2eaf4f4 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-29 10:07:13 +00:00
Andrew Morgan
53d25116df Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-25 14:33:14 +00:00
Andrew Morgan
08e25ffa0c Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-24 15:51:59 +00:00
Andrew Morgan
1c148e442b Merge branch 'anoa/room_dir_quick_fix' into matrix-org-hotfixes 2019-01-24 15:37:16 +00:00
Andrew Morgan
acaca1b4e9 Merge branch 'anoa/room_dir_quick_fix' into matrix-org-hotfixes 2019-01-24 14:51:35 +00:00
Andrew Morgan
4777836b83 Fix missing synapse metrics import 2019-01-23 15:26:03 +00:00
Andrew Morgan
7da659dd6d Use existing stream position counter metric 2019-01-23 15:04:12 +00:00
Andrew Morgan
77dfe51aba Name metric consistently 2019-01-23 15:04:05 +00:00
Andrew Morgan
ef7865e2f2 Track user_dir current event stream position 2019-01-23 15:03:54 +00:00
Matthew Hodgson
5cb15c0443 warn if we ignore device lists 2019-01-15 22:11:46 +00:00
Matthew Hodgson
b43172ffbc Merge pull request #4396 from matrix-org/matthew/bodge_device_update_dos
limit remote device lists to 10000 entries per user
2019-01-15 21:47:00 +00:00
Matthew Hodgson
b4796d1814 drop the limit to 1K as e2e will be hosed beyond that point anyway 2019-01-15 21:46:29 +00:00
Matthew Hodgson
482d06774a don't store remote device lists if they have more than 10K devices 2019-01-15 21:38:07 +00:00
Matthew Hodgson
046d731fbd limit remote device lists to 1000 entries per user 2019-01-15 21:07:12 +00:00
Richard van der Hoff
892f6c98ec Merge tag 'v0.34.1.1' into matrix-org-hotfixes
Synapse 0.34.1.1 (2019-01-11)
=============================

This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.1.

This release is compatible with Python 2.7 and 3.5+. Python 3.7 is fully supported.

Bugfixes
--------

- Fix spontaneous logout on upgrade
  ([\#4374](https://github.com/matrix-org/synapse/issues/4374))
2019-01-11 10:21:18 +00:00
Erik Johnston
7fafa2d954 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2019-01-09 09:13:16 +00:00
Richard van der Hoff
1d63046542 Merge tag 'v0.34.1rc1' into matrix-org-hotfixes
Synapse 0.34.1rc1 (2019-01-08)
==============================

Features
--------

- Special-case a support user for use in verifying behaviour of a given server. The support user does not appear in user directory or monthly active user counts. ([\#4141](https://github.com/matrix-org/synapse/issues/4141), [\#4344](https://github.com/matrix-org/synapse/issues/4344))
- Support for serving .well-known files ([\#4262](https://github.com/matrix-org/synapse/issues/4262))
- Rework SAML2 authentication ([\#4265](https://github.com/matrix-org/synapse/issues/4265), [\#4267](https://github.com/matrix-org/synapse/issues/4267))
- SAML2 authentication: Initialise user display name from SAML2 data ([\#4272](https://github.com/matrix-org/synapse/issues/4272))
- Synapse can now have its conditional/extra dependencies installed by pip. This functionality can be used by using `pip install matrix-synapse[feature]`, where feature is a comma separated list with the possible values `email.enable_notifs`, `matrix-synapse-ldap3`, `postgres`, `resources.consent`, `saml2`, `url_preview`, and `test`. If you want to install all optional dependencies, you can use "all" instead. ([\#4298](https://github.com/matrix-org/synapse/issues/4298), [\#4325](https://github.com/matrix-org/synapse/issues/4325), [\#4327](https://github.com/matrix-org/synapse/issues/4327))
- Add routes for reading account data. ([\#4303](https://github.com/matrix-org/synapse/issues/4303))
- Add opt-in support for v2 rooms ([\#4307](https://github.com/matrix-org/synapse/issues/4307))
- Add a script to generate a clean config file ([\#4315](https://github.com/matrix-org/synapse/issues/4315))
- Return server data in /login response ([\#4319](https://github.com/matrix-org/synapse/issues/4319))

Bugfixes
--------

- Fix contains_url check to be consistent with other instances in code-base and check that value is an instance of string. ([\#3405](https://github.com/matrix-org/synapse/issues/3405))
- Fix CAS login when username is not valid in an MXID ([\#4264](https://github.com/matrix-org/synapse/issues/4264))
- Send CORS headers for /media/config ([\#4279](https://github.com/matrix-org/synapse/issues/4279))
- Add 'sandbox' to CSP for media reprository ([\#4284](https://github.com/matrix-org/synapse/issues/4284))
- Make the new landing page prettier. ([\#4294](https://github.com/matrix-org/synapse/issues/4294))
- Fix deleting E2E room keys when using old SQLite versions. ([\#4295](https://github.com/matrix-org/synapse/issues/4295))
- The metric synapse_admin_mau:current previously did not update when config.mau_stats_only was set to True ([\#4305](https://github.com/matrix-org/synapse/issues/4305))
- Fixed per-room account data filters ([\#4309](https://github.com/matrix-org/synapse/issues/4309))
- Fix indentation in default config ([\#4313](https://github.com/matrix-org/synapse/issues/4313))
- Fix synapse:latest docker upload ([\#4316](https://github.com/matrix-org/synapse/issues/4316))
- Fix test_metric.py compatibility with prometheus_client 0.5. Contributed by Maarten de Vries <maarten@de-vri.es>. ([\#4317](https://github.com/matrix-org/synapse/issues/4317))
- Avoid packaging _trial_temp directory in -py3 debian packages ([\#4326](https://github.com/matrix-org/synapse/issues/4326))
- Check jinja version for consent resource ([\#4327](https://github.com/matrix-org/synapse/issues/4327))
- fix NPE in /messages by checking if all events were filtered out ([\#4330](https://github.com/matrix-org/synapse/issues/4330))
- Fix `python -m synapse.config` on Python 3. ([\#4356](https://github.com/matrix-org/synapse/issues/4356))

Deprecations and Removals
-------------------------

- Remove the deprecated v1/register API on Python 2. It was never ported to Python 3. ([\#4334](https://github.com/matrix-org/synapse/issues/4334))

Internal Changes
----------------

- Getting URL previews of IP addresses no longer fails on Python 3. ([\#4215](https://github.com/matrix-org/synapse/issues/4215))
- drop undocumented dependency on dateutil ([\#4266](https://github.com/matrix-org/synapse/issues/4266))
- Update the example systemd config to use a virtualenv ([\#4273](https://github.com/matrix-org/synapse/issues/4273))
- Update link to kernel DCO guide ([\#4274](https://github.com/matrix-org/synapse/issues/4274))
- Make isort tox check print diff when it fails ([\#4283](https://github.com/matrix-org/synapse/issues/4283))
- Log room_id in Unknown room errors ([\#4297](https://github.com/matrix-org/synapse/issues/4297))
- Documentation improvements for coturn setup. Contributed by Krithin Sitaram. ([\#4333](https://github.com/matrix-org/synapse/issues/4333))
- Update pull request template to use absolute links ([\#4341](https://github.com/matrix-org/synapse/issues/4341))
- Update README to not lie about required restart when updating TLS certificates ([\#4343](https://github.com/matrix-org/synapse/issues/4343))
- Update debian packaging for compatibility with transitional package ([\#4349](https://github.com/matrix-org/synapse/issues/4349))
- Fix command hint to generate a config file when trying to start without a config file ([\#4353](https://github.com/matrix-org/synapse/issues/4353))
- Add better logging for unexpected errors while sending transactions ([\#4358](https://github.com/matrix-org/synapse/issues/4358))
2019-01-08 11:37:25 +00:00
Richard van der Hoff
4c238a9a91 Merge remote-tracking branch 'origin/release-v0.34.0' into matrix-org-hotfixes 2018-12-19 10:24:26 +00:00
Richard van der Hoff
002db39a36 Merge tag 'v0.34.0rc1' into matrix-org-hotfixes 2018-12-04 14:07:28 +00:00
Richard van der Hoff
c4074e4ab6 Revert "Merge branch 'rav/timestamp_patch' into matrix-org-hotfixes"
This reverts commit 7960e814e5, reversing
changes made to 3dd704ee9a.

We no longer need this; please redo it as a proper MSC & synapse PR if you want
to keep it...
2018-12-03 10:15:39 +00:00
Richard van der Hoff
7960e814e5 Merge branch 'rav/timestamp_patch' into matrix-org-hotfixes 2018-11-30 12:10:30 +00:00
Richard van der Hoff
080025e533 Fix buglet and remove thread_id stuff 2018-11-30 12:09:33 +00:00
Richard van der Hoff
9accd63a38 Initial patch from Erik 2018-11-30 12:04:38 +00:00
Richard van der Hoff
3dd704ee9a Merge branch 'develop' into matrix-org-hotfixes 2018-11-20 11:29:45 +00:00
Richard van der Hoff
28e28a1974 Merge branch 'develop' into matrix-org-hotfixes 2018-11-20 11:03:35 +00:00
Richard van der Hoff
b699178aa1 Merge branch 'develop' into matrix-org-hotfixes 2018-11-14 11:54:29 +00:00
Richard van der Hoff
c08c649fa1 Merge remote-tracking branch 'origin/erikj/fix_device_comparison' into matrix-org-hotfixes 2018-11-08 12:48:19 +00:00
hera
5c0c4b4079 Fix encoding error for consent form on python3
The form was rendering this as "b'01234....'".

-- richvdh
2018-11-08 11:03:39 +00:00
Richard van der Hoff
b55cdfaa31 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-11-08 10:47:56 +00:00
Richard van der Hoff
34406cf22c Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-11-06 10:49:20 +00:00
Amber Brown
f91aefd245 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-31 04:41:03 +11:00
Erik Johnston
f8281f42c8 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-10-29 18:16:58 +00:00
Amber Brown
7171bdf279 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-29 23:14:47 +11:00
Erik Johnston
9f2d14ee26 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-10-26 09:52:23 +01:00
Amber Brown
ead471e72d Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-22 22:18:02 +11:00
Richard van der Hoff
9a4011de46 Merge branch 'develop' into matrix-org-hotfixes 2018-10-18 16:37:01 +01:00
Amber Brown
33551be61b Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-15 20:15:27 +11:00
Richard van der Hoff
eeb29d99fd Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-09 09:49:08 +01:00
Richard van der Hoff
1a0c407e6b Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-10-09 09:47:37 +01:00
Erik Johnston
c4b37cbf18 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-10-02 16:44:57 +01:00
Erik Johnston
7fa156af80 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-10-02 14:39:30 +01:00
Richard van der Hoff
78825f4f1c Merge branch 'develop' into matrix-org-hotfixes 2018-09-26 13:27:33 +01:00
Richard van der Hoff
6e15b5debe Revert "Actuall set cache factors in workers"
This reverts commit e21c312e16.
2018-09-26 13:25:52 +01:00
Matthew Hodgson
2e0d2879d0 Merge branch 'develop' into matrix-org-hotfixes 2018-09-26 11:00:26 +01:00
Michael Kaye
128043072b Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-24 11:20:10 +01:00
Erik Johnston
b2fda9d20e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-20 11:00:14 +01:00
Erik Johnston
3c8c5eabc2 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-15 11:40:37 +01:00
Erik Johnston
2da2041e2e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-14 19:56:33 +01:00
Erik Johnston
b5eef203f4 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-14 18:25:55 +01:00
Erik Johnston
df73da691f Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-13 16:15:56 +01:00
Matthew Hodgson
30d054e0bb Merge branch 'develop' into matrix-org-hotfixes 2018-09-12 17:16:21 +01:00
Erik Johnston
ebb3cc4ab6 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-12 11:22:06 +01:00
Erik Johnston
17201abd53 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-09-11 14:17:33 +01:00
Erik Johnston
2f141f4c41 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-22 11:47:08 +01:00
Richard van der Hoff
638c0bf49b Merge branch 'rav/fix_gdpr_consent' into matrix-org-hotfixes 2018-08-21 22:54:35 +01:00
hera
d1065e6f51 Merge tag 'v0.33.3rc2' into matrix-org-hotfixes
Bugfixes
--------

- Fix bug in v0.33.3rc1 which caused infinite loops and OOMs
([\#3723](https://github.com/matrix-org/synapse/issues/3723))
2018-08-21 19:12:14 +00:00
Erik Johnston
567863127a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-20 13:34:47 +01:00
Erik Johnston
f5abc10724 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-20 11:12:18 +01:00
Erik Johnston
bb795b56da Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-16 15:51:16 +01:00
Erik Johnston
4dd0604f61 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-15 15:37:05 +01:00
Richard van der Hoff
c05d278ba0 Merge branch 'rav/federation_metrics' into matrix-org-hotfixes 2018-08-07 19:11:29 +01:00
Erik Johnston
49a3163958 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-06 13:33:54 +01:00
Erik Johnston
1a568041fa Merge branch 'release-v0.33.1' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-08-02 15:28:32 +01:00
Erik Johnston
c9db8b0c32 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-24 17:22:23 +01:00
Erik Johnston
aa1bf10b91 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-24 15:49:38 +01:00
Erik Johnston
5222907bea Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-23 17:54:41 +01:00
Erik Johnston
e1eb147f2a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-23 16:45:22 +01:00
hera
e43eb47c5f Fixup limiter 2018-07-23 15:22:47 +00:00
hera
27eb4c45cd Lower hacky timeout for member limiter 2018-07-23 15:16:36 +00:00
Erik Johnston
b136d7ff8f Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-23 16:09:40 +01:00
Richard van der Hoff
9e56e1ab30 Merge branch 'develop' into matrix-org-hotfixes 2018-07-19 16:40:28 +01:00
Erik Johnston
742f757337 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-19 10:26:13 +01:00
Richard van der Hoff
2f5dfe299c Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-07-17 15:26:47 +01:00
Erik Johnston
e4eec87c6a Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-17 11:18:39 +01:00
Erik Johnston
f793ff4571 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-17 10:04:33 +01:00
Richard van der Hoff
195aae2f16 Merge branch 'develop' into matrix-org-hotfixes 2018-07-12 12:09:25 +01:00
Erik Johnston
7c79f2cb72 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-12 09:59:58 +01:00
Richard van der Hoff
f04e35c170 Merge branch 'develop' into matrix-org-hotfixes 2018-07-10 18:04:03 +01:00
Matthew Hodgson
36bbac05bd Merge branch 'develop' of git+ssh://github.com/matrix-org/synapse into matrix-org-hotfixes 2018-07-06 19:21:09 +01:00
Erik Johnston
e2a4b7681e Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-07-05 10:29:32 +01:00
Erik Johnston
957944eee4 Merge pull request #3476 from matrix-org/erikj/timeout_memberships
Timeout membership requests after 90s
2018-07-03 10:18:39 +01:00
Erik Johnston
bf425e533e Fix PEP8 2018-07-03 10:11:09 +01:00
Erik Johnston
ca21957b8a Timeout membership requests after 90s
This is a hacky fix to try and stop in flight requests from building up
2018-07-02 13:56:08 +01:00
Erik Johnston
6a95270671 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-06-29 14:10:29 +01:00
hera
82781f5838 Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes 2018-06-28 21:09:28 +00:00
Matthew Hodgson
aae6d3ff69 Merge remote-tracking branch 'origin/revert-3451-hawkowl/sorteddict-api' into matrix-org-hotfixes 2018-06-26 18:36:29 +01:00
Matthew Hodgson
9175225adf Merge remote-tracking branch 'origin/hawkowl/sorteddict-api' into matrix-org-hotfixes 2018-06-26 17:52:37 +01:00
David Baker
7a32fa0101 Fix error on deleting users pending deactivation
Use simple_delete instead of simple_delete_one as commented
2018-06-26 11:57:44 +01:00
Erik Johnston
d46450195b Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-06-25 20:14:34 +01:00
Erik Johnston
c0128c1021 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-06-25 20:12:13 +01:00
Erik Johnston
3320b7c9a4 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-06-25 15:23:18 +01:00
Erik Johnston
4c22c9b0b6 Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes 2018-06-25 14:37:13 +01:00
Richard van der Hoff
6d6ea1bb40 Merge branch 'develop' into matrix-org-hotfixes 2018-06-22 16:35:37 +01:00
aphrodite
9e38981ae4 Send HTTP pushes direct to http-priv rather than via clouldflare
(This is a heinous hack that ought to be made more generic and pushed back to develop)
2018-06-22 15:58:15 +01:00
hera
463e7c2709 Lower member limiter 2018-06-22 15:58:15 +01:00
Richard van der Hoff
ce9d0b1d0c Fix earlier logging patch
`@cached` doesn't work on decorated functions, because it uses inspection on
the target to calculate the number of arguments.
2018-06-22 15:58:15 +01:00
Richard van der Hoff
80786d5caf Logging for get_users_in_room 2018-06-22 15:58:15 +01:00
Richard van der Hoff
e18378c3e2 Increase member limiter to 20
Let's see if this makes the bridges go faster, or if it kills the synapse
master.
2018-06-22 15:58:15 +01:00
hera
0ca2857baa increase sync cache to 2 minutes
to give synchrotrons being hammered by repeating initial /syncs to get more
chance to actually complete and avoid a DoS
2018-06-22 15:58:15 +01:00
Erik Johnston
e21c312e16 Actuall set cache factors in workers 2018-06-22 15:58:15 +01:00
Richard van der Hoff
1031bd25f8 Avoid doing presence updates on replication reconnect
Presence is supposed to be disabled on matrix.org, so we shouldn't send a load
of USER_SYNC commands every time the synchrotron reconnects to the master.
2018-06-22 15:58:15 +01:00
hera
fae708c0e8 Disable auth on room_members for now
because the moznet bridge is broken (https://github.com/matrix-org/matrix-appservice-irc/issues/506)
2018-06-22 15:58:15 +01:00
Erik Johnston
8f8ea91eef Bump LAST_SEEN_GRANULARITY in client_ips 2018-06-22 15:58:15 +01:00
Erik Johnston
7a1406d144 Prefill client_ip_last_seen in replication 2018-06-22 15:58:15 +01:00
Erik Johnston
6373874833 Move event sending to end in shutdown room admin api 2018-06-22 15:58:15 +01:00
Erik Johnston
a79823e64b Add dummy presence REST handler to frontend proxy
The handler no-ops all requests as presence is disabled.
2018-06-22 15:58:15 +01:00
Erik Johnston
1766a5fdc0 Increase MAX_EVENTS_BEHIND for replication clients 2018-06-22 15:58:14 +01:00
Erik Johnston
e6b1ea3eb2 Disable presence in txn queue 2018-06-22 15:58:14 +01:00
Erik Johnston
e5537cf983 Limit concurrent AS joins 2018-06-22 15:58:14 +01:00
Erik Johnston
43bb12e640 Disable presence
This reverts commit 0ebd376a53 and
disables presence a bit more
2018-06-22 15:58:14 +01:00
Erik Johnston
66dcbf47a3 Disable auto search for prefixes in event search 2018-06-22 15:58:14 +01:00
Erik Johnston
a285fe05fd Add timeout to ResponseCache of /public_rooms 2018-06-22 15:58:14 +01:00
229 changed files with 5403 additions and 7426 deletions

View File

@@ -6,7 +6,12 @@
set -ex
apt-get update
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
# workaround for https://github.com/jaraco/zipp/issues/40
python3.5 -m pip install 'setuptools>=34.4.0'
python3.5 -m pip install tox
export LANG="C.UTF-8"

View File

@@ -39,5 +39,3 @@ Server correctly handles incoming m.device_list_update
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
Can get rooms/{roomId}/members at a given point

View File

@@ -1,268 +1,11 @@
Synapse 1.12.0 (2020-03-23)
===========================
No significant changes since 1.12.0rc1.
Debian packages and Docker images are rebuilt using the latest versions of
dependency libraries, including Twisted 20.3.0. **Please see security advisory
below**.
Security advisory
-----------------
Synapse may be vulnerable to request-smuggling attacks when it is used with a
reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are
described in
[CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108)
and
[CVE-2020-10109](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10109).
For a good introduction to this class of request-smuggling attacks, see
https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
We are not aware of these vulnerabilities being exploited in the wild, and
do not believe that they are exploitable with current versions of any reverse
proxies. Nevertheless, we recommend that all Synapse administrators ensure that
they have the latest versions of the Twisted library to ensure that their
installation remains secure.
* Administrators using the [`matrix.org` Docker
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
packages from
`matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
should ensure that they have version 1.12.0 installed: these images include
Twisted 20.3.0.
* Administrators who have [installed Synapse from
source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
should upgrade Twisted within their virtualenv by running:
```sh
<path_to_virtualenv>/bin/pip install 'Twisted>=20.3.0'
```
* Administrators who have installed Synapse from distribution packages should
consult the information from their distributions.
The `matrix.org` Synapse instance was not vulnerable to these vulnerabilities.
Advance notice of change to the default `git` branch for Synapse
----------------------------------------------------------------
Currently, the default `git` branch for Synapse is `master`, which tracks the
latest release.
After the release of Synapse 1.13.0, we intend to change this default to
`develop`, which is the development tip. This is more consistent with common
practice and modern `git` usage.
Although we try to keep `develop` in a stable state, there may be occasions
where regressions creep in. Developers and distributors who have scripts which
run builds using the default branch of `Synapse` should therefore consider
pinning their scripts to `master`.
Synapse 1.12.0rc1 (2020-03-19)
==============================
Features
--------
- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)):
- Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. ([\#6965](https://github.com/matrix-org/synapse/issues/6965))
- Validate the `alt_aliases` property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971))
- Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986))
- Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037))
- Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941))
- Synapse no longer uses room alias events to calculate room names for push notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966))
- The room list endpoint no longer returns a list of aliases. ([\#6970](https://github.com/matrix-org/synapse/issues/6970))
- Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. ([\#7034](https://github.com/matrix-org/synapse/issues/7034))
- Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. ([\#6315](https://github.com/matrix-org/synapse/issues/6315))
- Check that server_name is correctly set before running database updates. ([\#6982](https://github.com/matrix-org/synapse/issues/6982))
- Break down monthly active users by `appservice_id` and emit via Prometheus. ([\#7030](https://github.com/matrix-org/synapse/issues/7030))
- Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. ([\#7058](https://github.com/matrix-org/synapse/issues/7058), [\#7067](https://github.com/matrix-org/synapse/issues/7067))
- Add an optional parameter to control whether other sessions are logged out when a user's password is modified. ([\#7085](https://github.com/matrix-org/synapse/issues/7085))
- Add prometheus metrics for the number of active pushers. ([\#7103](https://github.com/matrix-org/synapse/issues/7103), [\#7106](https://github.com/matrix-org/synapse/issues/7106))
- Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. ([\#7094](https://github.com/matrix-org/synapse/issues/7094))
Bugfixes
--------
- When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. ([\#6572](https://github.com/matrix-org/synapse/issues/6572))
- Fix a couple of bugs in email configuration handling. ([\#6962](https://github.com/matrix-org/synapse/issues/6962))
- Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. ([\#6967](https://github.com/matrix-org/synapse/issues/6967))
- Fix `duplicate key` error which was logged when rejoining a room over federation. ([\#6968](https://github.com/matrix-org/synapse/issues/6968))
- Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. ([\#6990](https://github.com/matrix-org/synapse/issues/6990))
- Fix py35-old CI by using native tox package. ([\#7018](https://github.com/matrix-org/synapse/issues/7018))
- Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. ([\#7035](https://github.com/matrix-org/synapse/issues/7035))
- Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. ([\#7044](https://github.com/matrix-org/synapse/issues/7044))
- Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. ([\#7066](https://github.com/matrix-org/synapse/issues/7066))
- Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. ([\#7070](https://github.com/matrix-org/synapse/issues/7070))
- Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. ([\#7074](https://github.com/matrix-org/synapse/issues/7074))
Improved Documentation
----------------------
- Updated CentOS8 install instructions. Contributed by Richard Kellner. ([\#6925](https://github.com/matrix-org/synapse/issues/6925))
- Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. ([\#6984](https://github.com/matrix-org/synapse/issues/6984))
- Change date in [INSTALL.md](./INSTALL.md#tls-certificates) for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015))
- Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. ([\#7048](https://github.com/matrix-org/synapse/issues/7048))
Deprecations and Removals
-------------------------
- Remove the unused query_auth federation endpoint per [MSC2451](https://github.com/matrix-org/matrix-doc/pull/2451). ([\#7026](https://github.com/matrix-org/synapse/issues/7026))
Internal Changes
----------------
- Add type hints to `logging/context.py`. ([\#6309](https://github.com/matrix-org/synapse/issues/6309))
- Add some clarifications to `README.md` in the database schema directory. ([\#6615](https://github.com/matrix-org/synapse/issues/6615))
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003))
- Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095))
- Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953))
- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954))
- Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956))
- Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957))
- Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104))
- Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. ([\#6979](https://github.com/matrix-org/synapse/issues/6979))
- Update warning for incorrect database collation/ctype to include link to documentation. ([\#6985](https://github.com/matrix-org/synapse/issues/6985))
- Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987))
- Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019))
- Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995))
- Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020))
- Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045))
- Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063))
Synapse 1.11.1 (2020-03-03)
===========================
This release includes a security fix impacting installations using Single Sign-On (i.e. SAML2 or CAS) for authentication. Administrators of such installations are encouraged to upgrade as soon as possible.
The release also includes fixes for a couple of other bugs.
Bugfixes
--------
- Add a confirmation step to the SSO login flow before redirecting users to the redirect URL. ([b2bd54a2](https://github.com/matrix-org/synapse/commit/b2bd54a2e31d9a248f73fadb184ae9b4cbdb49f9), [65c73cdf](https://github.com/matrix-org/synapse/commit/65c73cdfec1876a9fec2fd2c3a74923cd146fe0b), [a0178df1](https://github.com/matrix-org/synapse/commit/a0178df10422a76fd403b82d2b2a4ed28a9a9d1e))
- Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/<user_id>`. Contributed by @dklimpel. ([\#6910](https://github.com/matrix-org/synapse/issues/6910))
- Fix bug introduced in Synapse 1.11.0 which sometimes caused errors when joining rooms over federation, with `'coroutine' object has no attribute 'event_id'`. ([\#6996](https://github.com/matrix-org/synapse/issues/6996))
Synapse 1.11.0 (2020-02-21)
===========================
Improved Documentation
----------------------
- Small grammatical fixes to the ACME v1 deprecation notice. ([\#6944](https://github.com/matrix-org/synapse/issues/6944))
Synapse 1.11.0rc1 (2020-02-19)
==============================
Features
--------
- Admin API to add or modify threepids of user accounts. ([\#6769](https://github.com/matrix-org/synapse/issues/6769))
- Limit the number of events that can be requested by the backfill federation API to 100. ([\#6864](https://github.com/matrix-org/synapse/issues/6864))
- Add ability to run some group APIs on workers. ([\#6866](https://github.com/matrix-org/synapse/issues/6866))
- Reject device display names over 100 characters in length to prevent abuse. ([\#6882](https://github.com/matrix-org/synapse/issues/6882))
- Add ability to route federation user device queries to workers. ([\#6873](https://github.com/matrix-org/synapse/issues/6873))
- The result of a user directory search can now be filtered via the spam checker. ([\#6888](https://github.com/matrix-org/synapse/issues/6888))
- Implement new `GET /_matrix/client/unstable/org.matrix.msc2432/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949))
- Stop sending `m.room.alias` events wheng adding / removing aliases. Check `alt_aliases` in the latest `m.room.canonical_alias` event when deleting an alias. ([\#6904](https://github.com/matrix-org/synapse/issues/6904))
- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834))
Bugfixes
--------
- Fixed third party event rules function `on_create_room`'s return value being ignored. ([\#6781](https://github.com/matrix-org/synapse/issues/6781))
- Allow URL-encoded User IDs on `/_synapse/admin/v2/users/<user_id>[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825))
- Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849))
- Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945))
- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901))
Updates to the Docker image
---------------------------
- The deprecated "generate-config-on-the-fly" mode is no longer supported. ([\#6918](https://github.com/matrix-org/synapse/issues/6918))
Improved Documentation
----------------------
- Add details of PR merge strategy to contributing docs. ([\#6846](https://github.com/matrix-org/synapse/issues/6846))
- Spell out that the last event sent to a room won't be deleted by a purge. ([\#6891](https://github.com/matrix-org/synapse/issues/6891))
- Update Synapse's documentation to warn about the deprecation of ACME v1. ([\#6905](https://github.com/matrix-org/synapse/issues/6905), [\#6907](https://github.com/matrix-org/synapse/issues/6907), [\#6909](https://github.com/matrix-org/synapse/issues/6909))
- Add documentation for the spam checker. ([\#6906](https://github.com/matrix-org/synapse/issues/6906))
- Fix worker docs to point `/publicised_groups` API correctly. ([\#6938](https://github.com/matrix-org/synapse/issues/6938))
- Clean up and update docs on setting up federation. ([\#6940](https://github.com/matrix-org/synapse/issues/6940))
- Add a warning about indentation to generated configuration files. ([\#6920](https://github.com/matrix-org/synapse/issues/6920))
- Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. ([\#6921](https://github.com/matrix-org/synapse/issues/6921))
- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855))
Deprecations and Removals
-------------------------
- Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. ([\#6877](https://github.com/matrix-org/synapse/issues/6877))
Internal Changes
----------------
- Add type hints to `SyncHandler`. ([\#6821](https://github.com/matrix-org/synapse/issues/6821))
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6823](https://github.com/matrix-org/synapse/issues/6823), [\#6827](https://github.com/matrix-org/synapse/issues/6827), [\#6854](https://github.com/matrix-org/synapse/issues/6854), [\#6856](https://github.com/matrix-org/synapse/issues/6856), [\#6857](https://github.com/matrix-org/synapse/issues/6857), [\#6858](https://github.com/matrix-org/synapse/issues/6858))
- Fix stacktraces when using `ObservableDeferred` and async/await. ([\#6836](https://github.com/matrix-org/synapse/issues/6836))
- Port much of `synapse.handlers.federation` to async/await. ([\#6837](https://github.com/matrix-org/synapse/issues/6837), [\#6840](https://github.com/matrix-org/synapse/issues/6840))
- Populate `rooms.room_version` database column at startup, rather than in a background update. ([\#6847](https://github.com/matrix-org/synapse/issues/6847))
- Reduce amount we log at `INFO` level. ([\#6833](https://github.com/matrix-org/synapse/issues/6833), [\#6862](https://github.com/matrix-org/synapse/issues/6862))
- Remove unused `get_room_stats_state` method. ([\#6869](https://github.com/matrix-org/synapse/issues/6869))
- Add typing to `synapse.federation.sender` and port to async/await. ([\#6871](https://github.com/matrix-org/synapse/issues/6871))
- Refactor `_EventInternalMetadata` object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872))
- Add an additional entry to the SyTest blacklist for worker mode. ([\#6883](https://github.com/matrix-org/synapse/issues/6883))
- Fix the use of sed in the linting scripts when using BSD sed. ([\#6887](https://github.com/matrix-org/synapse/issues/6887))
- Add type hints to the spam checker module. ([\#6915](https://github.com/matrix-org/synapse/issues/6915))
- Convert the directory handler tests to use HomeserverTestCase. ([\#6919](https://github.com/matrix-org/synapse/issues/6919))
- Increase DB/CPU perf of `_is_server_still_joined` check. ([\#6936](https://github.com/matrix-org/synapse/issues/6936))
- Tiny optimisation for incoming HTTP request dispatch. ([\#6950](https://github.com/matrix-org/synapse/issues/6950))
Synapse 1.10.1 (2020-02-17)
===========================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. ([\#6924](https://github.com/matrix-org/synapse/issues/6924))
Synapse 1.10.0 (2020-02-12)
===========================
**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details.
Updates to the Docker image
---------------------------
- Update the docker images to Alpine Linux 3.11. ([\#6897](https://github.com/matrix-org/synapse/issues/6897))
Synapse 1.10.0rc5 (2020-02-11)
==============================
Bugfixes
--------
- Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884))
Synapse 1.10.0rc4 (2020-02-11)
==============================
This release candidate was built incorrectly and is superceded by 1.10.0rc5.
Features
--------
- Filter out m.room.aliases from /sync state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884))
Synapse 1.10.0rc3 (2020-02-10)
==============================
@@ -270,7 +13,7 @@ Synapse 1.10.0rc3 (2020-02-10)
Features
--------
- Filter out `m.room.aliases` from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878))
- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878))
Internal Changes
@@ -298,6 +41,9 @@ Internal Changes
Synapse 1.10.0rc1 (2020-01-31)
==============================
**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details.
Features
--------

View File

@@ -60,7 +60,7 @@ python 3.6 and to install each tool:
```
# Install the dependencies
pip install -U black flake8 flake8-comprehensions isort
pip install -U black flake8 isort
# Run the linter script
./scripts-dev/lint.sh
@@ -200,20 +200,6 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
## Merge Strategy
We use the commit history of develop/master extensively to identify
when regressions were introduced and what changes have been made.
We aim to have a clean merge history, which means we normally squash-merge
changes into develop. For small changes this means there is no need to rebase
to clean up your PR before merging. Larger changes with an organised set of
commits may be merged as-is, if the history is judged to be useful.
This use of squash-merging will mean PRs built on each other will be hard to
merge. We suggest avoiding these where possible, and if required, ensuring
each PR has a tidy set of commits to ease merging.
## Conclusion
That's it! Matrix is a very open and collaborative project as you might expect

View File

@@ -124,21 +124,12 @@ sudo pacman -S base-devel python python-pip \
#### CentOS/Fedora
Installing prerequisites on CentOS 8 or Fedora>26:
```
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
sudo dnf groupinstall "Development Tools"
```
Installing prerequisites on CentOS 7 or Fedora<=25:
Installing prerequisites on CentOS 7 or Fedora 25:
```
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
python3-virtualenv libffi-devel openssl-devel
python-virtualenv libffi-devel openssl-devel
sudo yum groupinstall "Development Tools"
```
@@ -397,17 +388,15 @@ Once you have installed synapse as above, you will need to configure it.
## TLS certificates
The default configuration exposes a single HTTP port on the local
interface: `http://localhost:8008`. It is suitable for local testing,
but for any practical use, you will need Synapse's APIs to be served
over HTTPS.
The default configuration exposes a single HTTP port: http://localhost:8008. It
is suitable for local testing, but for any practical use, you will either need
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
The recommended way to do so is to set up a reverse proxy on port
`8448`. You can find documentation on doing so in
For information on using a reverse proxy, see
[docs/reverse_proxy.md](docs/reverse_proxy.md).
Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows:
To configure Synapse to expose an HTTPS port, you will need to edit
`homeserver.yaml`, as follows:
* First, under the `listeners` section, uncomment the configuration for the
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
@@ -425,13 +414,10 @@ so, you will need to edit `homeserver.yaml`, as follows:
point these settings at an existing certificate and key, or you can
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
for having Synapse automatically provision and renew federation
certificates through ACME can be found at [ACME.md](docs/ACME.md).
Note that, as pointed out in that document, this feature will not
work with installs set up after November 2019.
If you are using your own certificate, be sure to use a `.pem` file that
includes the full certificate chain including any intermediate certificates
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
are using your own certificate, be sure to use a `.pem` file that includes
the full certificate chain including any intermediate certificates (for
instance, if using certbot, use `fullchain.pem` as your certificate, not
`cert.pem`).
For a more detailed guide to configuring your server for federation, see

View File

@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
virtualenv -p python3 env
source env/bin/activate
python -m pip install --no-use-pep517 -e ".[all]"
python -m pip install --no-use-pep517 -e .[all]
This will run a process of downloading and installing all the needed
dependencies into a virtual env.

1
changelog.d/6126.feature Normal file
View File

@@ -0,0 +1 @@
Group events into larger federation transactions at times of high traffic.

1
changelog.d/6418.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix phone home stats reporting.

1
changelog.d/6866.feature Normal file
View File

@@ -0,0 +1 @@
Add ability to run some group APIs on workers.

1
changelog.d/6873.feature Normal file
View File

@@ -0,0 +1 @@
Add ability to route federation user device queries to workers.

View File

@@ -15,9 +15,10 @@ services:
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
- SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
volumes:
# You may either store all the files in a local folder
- ./matrix-config/homeserver.yaml:/etc/homeserver.yaml
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
@@ -55,9 +56,6 @@ services:
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data

View File

@@ -1,6 +1,6 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up additional recording rules

View File

@@ -18,7 +18,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": 1,
"iteration": 1584612489167,
"iteration": 1561447718159,
"links": [
{
"asDropdown": true,
@@ -34,7 +34,6 @@
"panels": [
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -53,14 +52,12 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 1
},
"hiddenSeries": false,
"id": 75,
"legend": {
"avg": false,
@@ -75,9 +72,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -156,7 +151,6 @@
"editable": true,
"error": false,
"fill": 1,
"fillGradient": 0,
"grid": {},
"gridPos": {
"h": 9,
@@ -164,7 +158,6 @@
"x": 12,
"y": 1
},
"hiddenSeries": false,
"id": 33,
"legend": {
"avg": false,
@@ -179,9 +172,7 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -311,14 +302,12 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 10
},
"hiddenSeries": false,
"id": 107,
"legend": {
"avg": false,
@@ -333,9 +322,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -438,14 +425,12 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 19
},
"hiddenSeries": false,
"id": 118,
"legend": {
"avg": false,
@@ -460,9 +445,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -559,7 +542,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -1379,7 +1361,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -1751,7 +1732,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2459,7 +2439,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2656,7 +2635,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -2672,12 +2650,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 33
"y": 61
},
"id": 79,
"legend": {
@@ -2693,9 +2670,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -2710,13 +2684,8 @@
"expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "successful txn rate",
"legendFormat": "txn rate",
"refId": "A"
},
{
"expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
"legendFormat": "failed txn rate",
"refId": "B"
}
],
"thresholds": [],
@@ -2767,12 +2736,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 33
"y": 61
},
"id": 83,
"legend": {
@@ -2788,9 +2756,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -2864,12 +2829,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 42
"y": 70
},
"id": 109,
"legend": {
@@ -2885,9 +2849,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -2962,12 +2923,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 42
"y": 70
},
"id": 111,
"legend": {
@@ -2983,9 +2943,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -3052,7 +3009,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -3068,14 +3024,12 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"h": 7,
"w": 12,
"x": 0,
"y": 34
"y": 62
},
"hiddenSeries": false,
"id": 51,
"legend": {
"avg": false,
@@ -3090,9 +3044,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -3161,95 +3112,6 @@
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
"description": "",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 34
},
"hiddenSeries": false,
"id": 134,
"legend": {
"avg": false,
"current": false,
"hideZero": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})",
"legendFormat": "{{kind}} {{app_id}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Active pusher instances by app",
"tooltip": {
"shared": false,
"sort": 2,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"repeat": null,
@@ -3258,7 +3120,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -3662,7 +3523,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -3680,7 +3540,6 @@
"editable": true,
"error": false,
"fill": 1,
"fillGradient": 0,
"grid": {},
"gridPos": {
"h": 13,
@@ -3703,9 +3562,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -3774,7 +3630,6 @@
"editable": true,
"error": false,
"fill": 1,
"fillGradient": 0,
"grid": {},
"gridPos": {
"h": 13,
@@ -3797,9 +3652,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -3868,7 +3720,6 @@
"editable": true,
"error": false,
"fill": 1,
"fillGradient": 0,
"grid": {},
"gridPos": {
"h": 13,
@@ -3891,9 +3742,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -3962,7 +3810,6 @@
"editable": true,
"error": false,
"fill": 1,
"fillGradient": 0,
"grid": {},
"gridPos": {
"h": 13,
@@ -3985,9 +3832,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -4077,7 +3921,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -4167,7 +4010,6 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -4234,7 +4076,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -4699,7 +4540,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5220,7 +5060,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5240,7 +5079,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 39
"y": 67
},
"id": 2,
"legend": {
@@ -5256,7 +5095,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5360,7 +5198,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 39
"y": 67
},
"id": 41,
"legend": {
@@ -5376,7 +5214,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5449,7 +5286,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 46
"y": 74
},
"id": 42,
"legend": {
@@ -5465,7 +5302,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5537,7 +5373,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 46
"y": 74
},
"id": 43,
"legend": {
@@ -5553,7 +5389,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5625,7 +5460,7 @@
"h": 7,
"w": 12,
"x": 0,
"y": 53
"y": 81
},
"id": 113,
"legend": {
@@ -5641,7 +5476,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5712,7 +5546,7 @@
"h": 7,
"w": 12,
"x": 12,
"y": 53
"y": 81
},
"id": 115,
"legend": {
@@ -5728,7 +5562,6 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5740,7 +5573,7 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"expr": "rate(synapse_replication_tcp_protocol_close_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{reason_type}}",
@@ -5795,7 +5628,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -5811,12 +5643,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 40
"y": 13
},
"id": 67,
"legend": {
@@ -5832,9 +5663,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5850,7 +5679,7 @@
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{name}}",
"legendFormat": "{{job}}-{{index}} ",
"refId": "A"
}
],
@@ -5902,12 +5731,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 40
"y": 13
},
"id": 71,
"legend": {
@@ -5923,9 +5751,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -5993,12 +5819,11 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 49
"y": 22
},
"id": 121,
"interval": "",
@@ -6015,9 +5840,7 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
"options": {
"dataLinks": []
},
"options": {},
"paceLength": 10,
"percentage": false,
"pointradius": 5,
@@ -6086,7 +5909,6 @@
},
{
"collapsed": true,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
@@ -6785,7 +6607,7 @@
}
],
"refresh": "5m",
"schemaVersion": 22,
"schemaVersion": 18,
"style": "dark",
"tags": [
"matrix"
@@ -6794,7 +6616,7 @@
"list": [
{
"current": {
"selected": true,
"tags": [],
"text": "Prometheus",
"value": "Prometheus"
},
@@ -6816,7 +6638,6 @@
"auto_count": 100,
"auto_min": "30s",
"current": {
"selected": false,
"text": "auto",
"value": "$__auto_interval_bucket_size"
},
@@ -6898,9 +6719,9 @@
"allFormat": "regex wildcard",
"allValue": "",
"current": {
"text": "synapse",
"text": "All",
"value": [
"synapse"
"$__all"
]
},
"datasource": "$datasource",
@@ -6930,9 +6751,7 @@
"allValue": ".*",
"current": {
"text": "All",
"value": [
"$__all"
]
"value": "$__all"
},
"datasource": "$datasource",
"definition": "",
@@ -6991,5 +6810,5 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
"version": 19
"version": 10
}

30
debian/changelog vendored
View File

@@ -1,33 +1,3 @@
matrix-synapse-py3 (1.12.0) stable; urgency=medium
* New synapse release 1.12.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 23 Mar 2020 12:13:03 +0000
matrix-synapse-py3 (1.11.1) stable; urgency=medium
* New synapse release 1.11.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Mar 2020 15:01:22 +0000
matrix-synapse-py3 (1.11.0) stable; urgency=medium
* New synapse release 1.11.0.
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Feb 2020 08:54:34 +0000
matrix-synapse-py3 (1.10.1) stable; urgency=medium
* New synapse release 1.10.1.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Feb 2020 16:27:28 +0000
matrix-synapse-py3 (1.10.0) stable; urgency=medium
* New synapse release 1.10.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Feb 2020 12:18:54 +0000
matrix-synapse-py3 (1.9.1) stable; urgency=medium
* New synapse release 1.9.1.

View File

@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
# install the OS build deps

View File

@@ -110,12 +110,12 @@ argument to `docker run`.
## Legacy dynamic configuration file support
The docker image used to support creating a dynamic configuration file based
on environment variables. This is no longer supported, and an error will be
raised if you try to run synapse without a config file.
For backwards-compatibility only, the docker image supports creating a dynamic
configuration file based on environment variables. This is now deprecated, but
is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is
not given).
It is, however, possible to generate a static configuration file based on
the environment variables that were previously used. To do this, run the docker
To migrate from a dynamic configuration file to a static one, run the docker
container once with the environment variables set, and `migrate_config`
command line option. For example:
@@ -127,20 +127,15 @@ docker run -it --rm \
matrixdotorg/synapse:latest migrate_config
```
This will generate the same configuration file as the legacy mode used, and
will store it in `/data/homeserver.yaml`. You can then use it as shown above at
[Running synapse](#running-synapse).
Note that the defaults used in this configuration file may be different to
those when generating a new config file with `generate`: for example, TLS is
enabled by default in this mode. You are encouraged to inspect the generated
configuration file and edit it to ensure it meets your needs.
This will generate the same configuration file as the legacy mode used, but
will store it in `/data/homeserver.yaml` instead of a temporary location. You
can then use it as shown above at [Running synapse](#running-synapse).
## Building the image
If you need to build the image from a Synapse checkout, use the following `docker
build` command from the repo's root:
```
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```

View File

@@ -188,6 +188,11 @@ def main(args, environ):
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
log(
"Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
% (os.getuid(), os.getgid(), desired_uid, desired_gid)
)
if ownership is None:
log("Will not perform chmod/su-exec as UserID already matches request")
@@ -208,30 +213,38 @@ def main(args, environ):
if mode is not None:
error("Unknown execution mode '%s'" % (mode,))
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
if "SYNAPSE_SERVER_NAME" in environ:
# backwards-compatibility generate-a-config-on-the-fly mode
if "SYNAPSE_CONFIG_PATH" in environ:
error(
"""\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
% (config_path,)
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
"in `generate` or `migrate_config` mode. To start synapse using a "
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
)
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
config_path = "/compiled/homeserver.yaml"
log(
"Generating config file '%s' on-the-fly from environment variables.\n"
"Note that this mode is deprecated. You can migrate to a static config\n"
"file by running with 'migrate_config'. See the README for more details."
% (config_path,)
)
generate_config_from_template("/compiled", config_path, environ, ownership)
else:
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
if not os.path.exists(config_path):
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
log("Starting synapse with config file " + config_path)
args = ["python", "-m", synapse_worker, "--config-path", config_path]

View File

@@ -1,4 +1,4 @@
# This file is maintained as an up-to-date snapshot of the default
# The config is maintained as an up-to-date snapshot of the default
# homeserver.yaml configuration generated by Synapse.
#
# It is intended to act as a reference for the default configuration,
@@ -10,5 +10,3 @@
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in INSTALL.md.
################################################################################

View File

@@ -1,48 +1,12 @@
# ACME
From version 1.0 (June 2019) onwards, Synapse requires valid TLS
certificates for communication between servers (by default on port
`8448`) in addition to those that are client-facing (port `443`). To
help homeserver admins fulfil this new requirement, Synapse v0.99.0
introduced support for automatically provisioning certificates through
[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol.
## Deprecation of ACME v1
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
Let's Encrypt announced that they were deprecating version 1 of the ACME
protocol, with the plan to disable the use of it for new accounts in
November 2019, and for existing accounts in June 2020.
Synapse doesn't currently support version 2 of the ACME protocol, which
means that:
* for existing installs, Synapse's built-in ACME support will continue
to work until June 2020.
* for new installs, this feature will not work at all.
Either way, it is recommended to move from Synapse's ACME support
feature to an external automated tool such as [certbot](https://github.com/certbot/certbot)
(or browse [this list](https://letsencrypt.org/fr/docs/client-options/)
for an alternative ACME client).
It's also recommended to use a reverse proxy for the server-facing
communications (more documentation about this can be found
[here](/docs/reverse_proxy.md)) as well as the client-facing ones and
have it serve the certificates.
In case you can't do that and need Synapse to serve them itself, make
sure to set the `tls_certificate_path` configuration setting to the path
of the certificate (make sure to use the certificate containing the full
certification chain, e.g. `fullchain.pem` if using certbot) and
`tls_private_key_path` to the path of the matching private key. Note
that in this case you will need to restart Synapse after each
certificate renewal so that Synapse stops using the old certificate.
If you still want to use Synapse's built-in ACME support, the rest of
this document explains how to set it up.
## Initial setup
Synapse v1.0 will require valid TLS certificates for communication between
servers (port `8448` by default) in addition to those that are client-facing
(port `443`). If you do not already have a valid certificate for your domain,
the easiest way to get one is with Synapse's new ACME support, which will use
the ACME protocol to provision a certificate automatically. Synapse v0.99.0+
will provision server-to-server certificates automatically for you for free
through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
In the case that your `server_name` config variable is the same as
the hostname that the client connects to, then the same certificate can be
@@ -68,6 +32,11 @@ If you already have certificates, you will need to back up or delete them
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
directory), Synapse's ACME implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
already have a valid certificate for your homeserver's domain, that can be
placed in Synapse's config directory without the need for any ACME setup.
## ACME setup
The main steps for enabling ACME support in short summary are:

View File

@@ -8,9 +8,6 @@ Depending on the amount of history being purged a call to the API may take
several minutes or longer. During this period users will not be able to
paginate further back in the room from the point being purged from.
Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room.
The API is:
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``

View File

@@ -2,8 +2,7 @@ Create or modify Account
========================
This API allows an administrator to create or modify a user account with a
specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example,
``@user:server.com``.
specific ``user_id``.
This api is::
@@ -16,16 +15,6 @@ with a body of:
{
"password": "user_password",
"displayname": "User",
"threepids": [
{
"medium": "email",
"address": "<user_mail_1>"
},
{
"medium": "email",
"address": "<user_mail_2>"
}
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false
@@ -34,11 +23,9 @@ with a body of:
including an ``access_token`` of a server admin.
The parameter ``displayname`` is optional and defaults to ``user_id``.
The parameter ``threepids`` is optional.
The parameter ``avatar_url`` is optional.
The parameter ``admin`` is optional and defaults to 'false'.
The parameter ``deactivated`` is optional and defaults to 'false'.
The parameter ``password`` is optional. If provided the user's password is updated and all devices are logged out.
If the user already exists then optional parameters default to the current value.
List Accounts
@@ -169,14 +156,11 @@ with a body of:
.. code:: json
{
"new_password": "<secret>",
"logout_devices": true,
"new_password": "<secret>"
}
including an ``access_token`` of a server admin.
The parameter ``new_password`` is required.
The parameter ``logout_devices`` is optional and defaults to ``true``.
Get whether a user is a server administrator or not
===================================================

View File

@@ -30,7 +30,7 @@ The necessary tools are detailed below.
Install `flake8` with:
pip install --upgrade flake8 flake8-comprehensions
pip install --upgrade flake8
Check all application and test code with:

View File

@@ -1,94 +0,0 @@
# Delegation
By default, other homeservers will expect to be able to reach yours via
your `server_name`, on port 8448. For example, if you set your `server_name`
to `example.com` (so that your user names look like `@user:example.com`),
other servers will try to connect to yours at `https://example.com:8448/`.
Delegation is a Matrix feature allowing a homeserver admin to retain a
`server_name` of `example.com` so that user IDs, room aliases, etc continue
to look like `*:example.com`, whilst having federation traffic routed
to a different server and/or port (e.g. `synapse.example.com:443`).
## .well-known delegation
To use this method, you need to be able to alter the
`server_name` 's https server to serve the `/.well-known/matrix/server`
URL. Having an active server (with a valid TLS certificate) serving your
`server_name` domain is out of the scope of this documentation.
The URL `https://<server_name>/.well-known/matrix/server` should
return a JSON structure containing the key `m.server` like so:
```json
{
"m.server": "<synapse.server.name>[:<yourport>]"
}
```
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
should return:
```json
{
"m.server": "synapse.example.com:443"
}
```
Note, specifying a port is optional. If no port is specified, then it defaults
to 8448.
With .well-known delegation, federating servers will check for a valid TLS
certificate for the delegated hostname (in our example: `synapse.example.com`).
## SRV DNS record delegation
It is also possible to do delegation using a SRV DNS record. However, that is
considered an advanced topic since it's a bit complex to set up, and `.well-known`
delegation is already enough in most cases.
However, if you really need it, you can find some documentation on how such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
## Delegation FAQ
### When do I need delegation?
If your homeserver's APIs are accessible on the default federation port (8448)
and the domain your `server_name` points to, you do not need any delegation.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh server, you could install Synapse on that host, giving it a `server_name`
of `example.com`, and once a reverse proxy has been set up to proxy all requests
sent to the port `8448` and serve TLS certificates for `example.com`, you
wouldn't need any delegation set up.
**However**, if your homeserver's APIs aren't accessible on port 8448 and on the
domain `server_name` points to, you will need to let other servers know how to
find it using delegation.
### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
This is no longer necessary. If you are using a reverse proxy for all of your
TLS traffic, then you can set `no_tls: True` in the Synapse config.
In that case, the only reason Synapse needs the certificate is to populate a legacy
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
and later, and the only time pre-0.99 Synapses will check it is when attempting to
fetch the server keys - and generally this is delegated via `matrix.org`, which
is running a modern version of Synapse.
### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy.

View File

@@ -1,41 +1,163 @@
Setting up federation
Setting up Federation
=====================
Federation is the process by which users on different servers can participate
in the same room. For this to work, those other servers must be able to contact
yours to send messages.
The `server_name` configured in the Synapse configuration file (often
`homeserver.yaml`) defines how resources (users, rooms, etc.) will be
identified (eg: `@user:example.com`, `#room:example.com`). By default,
it is also the domain that other servers will use to try to reach your
server (via port 8448). This is easy to set up and will work provided
you set the `server_name` to match your machine's public DNS hostname.
For this default configuration to work, you will need to listen for TLS
connections on port 8448. The preferred way to do that is by using a
reverse proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions
on how to correctly set one up.
In some cases you might not want to run Synapse on the machine that has
the `server_name` as its public DNS hostname, or you might want federation
traffic to use a different port than 8448. For example, you might want to
have your user names look like `@user:example.com`, but you want to run
Synapse on `synapse.example.com` on port 443. This can be done using
delegation, which allows an admin to control where federation traffic should
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
The ``server_name`` configured in the Synapse configuration file (often
``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
identified (eg: ``@user:example.com``, ``#room:example.com``). By
default, it is also the domain that other servers will use to
try to reach your server (via port 8448). This is easy to set
up and will work provided you set the ``server_name`` to match your
machine's public DNS hostname, and provide Synapse with a TLS certificate
which is valid for your ``server_name``.
Once federation has been configured, you should be able to join a room over
federation. A good place to start is `#synapse:matrix.org` - a room for
federation. A good place to start is ``#synapse:matrix.org`` - a room for
Synapse admins.
## Delegation
For a more flexible configuration, you can have ``server_name``
resources (eg: ``@user:example.com``) served by a different host and
port (eg: ``synapse.example.com:443``). There are two ways to do this:
- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
- adding a DNS ``SRV`` record in the DNS zone of domain
``example.com``.
Without configuring delegation, the matrix federation will
expect to find your server via ``example.com:8448``. The following methods
allow you retain a `server_name` of `example.com` so that your user IDs, room
aliases, etc continue to look like `*:example.com`, whilst having your
federation traffic routed to a different server.
### .well-known delegation
To use this method, you need to be able to alter the
``server_name`` 's https server to serve the ``/.well-known/matrix/server``
URL. Having an active server (with a valid TLS certificate) serving your
``server_name`` domain is out of the scope of this documentation.
The URL ``https://<server_name>/.well-known/matrix/server`` should
return a JSON structure containing the key ``m.server`` like so:
{
"m.server": "<synapse.server.name>[:<yourport>]"
}
In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
should return:
{
"m.server": "synapse.example.com:443"
}
Note, specifying a port is optional. If a port is not specified an SRV lookup
is performed, as described below. If the target of the
delegation does not have an SRV record, then the port defaults to 8448.
Most installations will not need to configure .well-known. However, it can be
useful in cases where the admin is hosting on behalf of someone else and
therefore cannot gain access to the necessary certificate. With .well-known,
federation servers will check for a valid TLS certificate for the delegated
hostname (in our example: ``synapse.example.com``).
### DNS SRV delegation
To use this delegation method, you need to have write access to your
``server_name`` 's domain zone DNS records (in our example it would be
``example.com`` DNS zone).
This method requires the target server to provide a
valid TLS certificate for the original ``server_name``.
You need to add a SRV record in your ``server_name`` 's DNS zone with
this format:
_matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
In our example, we would need to add this SRV record in the
``example.com`` DNS zone:
_matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
Once done and set up, you can check the DNS record with ``dig -t srv
_matrix._tcp.<server_name>``. In our example, we would expect this:
$ dig -t srv _matrix._tcp.example.com
_matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
directly to the server hosting the synapse instance.
### Delegation FAQ
#### When do I need a SRV record or .well-known URI?
If your homeserver listens on the default federation port (8448), and your
`server_name` points to the host that your homeserver runs on, you do not need an SRV
record or `.well-known/matrix/server` URI.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh server, you could install Synapse on that host,
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
it would automatically generate a valid TLS certificate for you via Let's Encrypt
and no SRV record or .well-known URI would be needed.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it. The way to do this is via .well-known or an
SRV record.
#### I have created a .well-known URI. Do I also need an SRV record?
No. You can use either `.well-known` delegation or use an SRV record for delegation. You
do not need to use both to delegate to the same location.
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
Yes, you are welcome to manage your certificates yourself. Synapse will only
attempt to obtain certificates from Let's Encrypt if you configure it to do
so.The only requirement is that there is a valid TLS cert present for
federation end points.
#### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
Practically speaking, this is no longer necessary.
If you are using a reverse proxy for all of your TLS traffic, then you can set
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
needs the certificate is to populate a legacy `tls_fingerprints` field in the
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
and generally this is delegated via `matrix.org`, which will be running a modern
version of Synapse.
#### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy. However, Synapse will use the
same certificate on any ports where TLS is configured.
## Troubleshooting
You can use the [federation tester](https://matrix.org/federationtester)
to check if your homeserver is configured correctly. Alternatively try the
[JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
Note that you'll have to modify this URL to replace `DOMAIN` with your
`server_name`. Hitting the API directly provides extra detail.
You can use the [federation tester](
<https://matrix.org/federationtester>) to check if your homeserver is
configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
Note that you'll have to modify this URL to replace ``DOMAIN`` with your
``server_name``. Hitting the API directly provides extra detail.
The typical failure mode for federation is that when the server tries to join
a room, it is rejected with "401: Unauthorized". Generally this means that other
@@ -47,8 +169,8 @@ you invite them to. This can be caused by an incorrectly-configured reverse
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
configure a reverse proxy.
## Running a demo federation of Synapses
## Running a Demo Federation of Synapses
If you want to get up and running quickly with a trio of homeservers in a
private federation, there is a script in the `demo` directory. This is mainly
private federation, there is a script in the ``demo`` directory. This is mainly
useful just for development purposes. See [demo/README](<../demo/README>).

View File

@@ -42,10 +42,6 @@ purged according to its room's policy, then the receiving server will
process and store that event until it's picked up by the next purge job,
though it will always hide it from clients.
Synapse requires at least one message in each room, so it will never
delete the last message in a room. It will, however, hide it from
clients.
## Server configuration

View File

@@ -18,10 +18,9 @@ When setting up a reverse proxy, remember that Matrix clients and other
Matrix servers do not necessarily need to connect to your server via the
same server name or port. Indeed, clients will use port 443 by default,
whereas servers default to port 8448. Where these are different, we
refer to the 'client port' and the \'federation port\'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
refer to the 'client port' and the \'federation port\'. See [Setting
up federation](federate.md) for more details of the algorithm used for
federation connections.
Let's assume that we expect clients to connect to our server at
`https://matrix.example.com`, and other servers to connect at

View File

@@ -1,4 +1,4 @@
# This file is maintained as an up-to-date snapshot of the default
# The config is maintained as an up-to-date snapshot of the default
# homeserver.yaml configuration generated by Synapse.
#
# It is intended to act as a reference for the default configuration,
@@ -10,16 +10,6 @@
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in INSTALL.md.
################################################################################
# Configuration file for Synapse.
#
# This is a YAML file: see [1] for a quick introduction. Note in particular
# that *indentation is important*: all the elements of a list or dictionary
# should have the same indentation.
#
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
## Server ##
# The domain name of the server, with optional explicit port.
@@ -476,11 +466,6 @@ retention:
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
# ACME v2. This means that this feature currently won't work with installs set
# up after November 2019. For more info, and alternative solutions, see
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
#
# Note that provisioning a certificate in this way requires port 80 to be
# routed to Synapse so that it can complete the http-01 ACME challenge.
# By default, if you enable ACME support, Synapse will attempt to listen on
@@ -1347,25 +1332,6 @@ saml2_config:
#
#grandfathered_mxid_source_attribute: upn
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
# If you *do* uncomment it, you will need to make sure that all the templates
# below are in the directory.
#
# Synapse will look for the following templates in this directory:
#
# * HTML page to display to users if something goes wrong during the
# authentication process: 'saml_error.html'.
#
# This template doesn't currently need any variable to render.
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
# Enable CAS for registration and login.
@@ -1379,56 +1345,6 @@ saml2_config:
# # name: value
# Additional settings to use with single-sign on systems such as SAML2 and CAS.
#
sso:
# A list of client URLs which are whitelisted so that the user does not
# have to confirm giving access to their account to the URL. Any client
# whose URL starts with an entry in the following list will not be subject
# to an additional confirmation step after the SSO login is completed.
#
# WARNING: An entry such as "https://my.client" is insecure, because it
# will also match "https://my.client.evil.site", exposing your users to
# phishing attacks from evil.site. To avoid this, include a slash after the
# hostname: "https://my.client/".
#
# By default, this list is empty.
#
#client_whitelist:
# - https://riot.im/develop
# - https://my.custom.client/
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
# If you *do* uncomment it, you will need to make sure that all the templates
# below are in the directory.
#
# Synapse will look for the following templates in this directory:
#
# * HTML page for a confirmation step before redirecting back to the client
# with the login token: 'sso_redirect_confirm.html'.
#
# When rendering, this template is given three variables:
# * redirect_url: the URL the user is about to be redirected to. Needs
# manual escaping (see
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
#
# * display_url: the same as `redirect_url`, but with the query
# parameters stripped. The intention is to have a
# human-readable URL to show to users, not to use it as
# the final address to redirect to. Needs manual escaping
# (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
#
# * server_name: the homeserver's name.
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
# The JWT needs to contain a globally unique "sub" (subject) claim.
#
#jwt_config:
@@ -1478,6 +1394,10 @@ email:
#
#require_transport_security: true
# Enable sending emails for messages that the user has missed
#
#enable_notifs: false
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
@@ -1495,11 +1415,6 @@ email:
#
#app_name: my_branded_matrix_server
# Uncomment the following to enable sending emails for messages that the user
# has missed. Disabled by default.
#
#enable_notifs: true
# Uncomment the following to disable automatic subscription to email
# notifications for new users. Enabled by default.
#

View File

@@ -1,88 +0,0 @@
# Handling spam in Synapse
Synapse has support to customize spam checking behavior. It can plug into a
variety of events and affect how they are presented to users on your homeserver.
The spam checking behavior is implemented as a Python class, which must be
able to be imported by the running Synapse.
## Python spam checker class
The Python class is instantiated with two objects:
* Any configuration (see below).
* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
It then implements methods which return a boolean to alter behavior in Synapse.
There's a generic method for checking every event (`check_event_for_spam`), as
well as some specific methods:
* `user_may_invite`
* `user_may_create_room`
* `user_may_create_room_alias`
* `user_may_publish_room`
The details of the each of these methods (as well as their inputs and outputs)
are documented in the `synapse.events.spamcheck.SpamChecker` class.
The `SpamCheckerApi` class provides a way for the custom spam checker class to
call back into the homeserver internals. It currently implements the following
methods:
* `get_state_events_in_room`
### Example
```python
class ExampleSpamChecker:
def __init__(self, config, api):
self.config = config
self.api = api
def check_event_for_spam(self, foo):
return False # allow all events
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
def user_may_create_room(self, userid):
return True # allow all room creations
def user_may_create_room_alias(self, userid, room_alias):
return True # allow all room aliases
def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
def check_username_for_spam(self, user_profile):
return False # allow all usernames
```
## Configuration
Modify the `spam_checker` section of your `homeserver.yaml` in the following
manner:
`module` should point to the fully qualified Python class that implements your
custom logic, e.g. `my_module.ExampleSpamChecker`.
`config` is a dictionary that gets passed to the spam checker class.
### Example
This section might look like:
```yaml
spam_checker:
module: my_module.ExampleSpamChecker
config:
# Enable or disable a specific option in ExampleSpamChecker.
my_custom_option: true
```
## Examples
The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
example using the Synapse spam checking API, including a bot for dynamic
configuration.

View File

@@ -261,8 +261,7 @@ following regular expressions:
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$
Additionally, the following REST endpoints can be handled for GET requests:
@@ -273,7 +272,6 @@ Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance:
^/_matrix/client/(r0|unstable)/register$
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
Pagination requests can also be handled, but all requests with the same path
room must be routed to the same instance. Additionally, care must be taken to
@@ -289,8 +287,8 @@ the following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
When using this worker you must also set `update_user_directory: False` in the
shared configuration file to stop the main synapse running background
When using this worker you must also set `update_user_directory: False` in the
shared configuration file to stop the main synapse running background
jobs related to updating the user directory.
### `synapse.app.frontend_proxy`

View File

@@ -3,8 +3,7 @@
# Exits with 0 if there are no problems, or another code otherwise.
# Fix non-lowercase true/false values
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
rm docs/sample_config.yaml.bak
sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
# Check if anything changed
git diff --exit-code docs/sample_config.yaml

View File

@@ -103,7 +103,7 @@ def main():
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
rows = [row for server, json in result.items() for row in rows_v2(server, json)]
rows = list(row for server, json in result.items() for row in rows_v2(server, json))
cursor = connection.cursor()
cursor.executemany(

View File

@@ -1,31 +1,20 @@
name: matrix-synapse
base: core18
version: git
version: git
summary: Reference Matrix homeserver
description: |
Synapse is the reference Matrix homeserver.
Matrix is a federated and decentralised instant messaging and VoIP system.
grade: stable
confinement: strict
grade: stable
confinement: strict
apps:
matrix-synapse:
matrix-synapse:
command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
stop-command: synctl -c $SNAP_COMMON stop
plugs: [network-bind, network]
daemon: simple
hash-password:
command: hash_password
generate-config:
command: generate_config
generate-signing-key:
command: generate_signing_key.py
register-new-matrix-user:
command: register_new_matrix_user
plugs: [network]
synctl:
command: synctl
daemon: simple
parts:
matrix-synapse:
source: .

View File

@@ -36,7 +36,7 @@ try:
except ImportError:
pass
__version__ = "1.12.0"
__version__ = "1.10.0rc4"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import logging
from typing import Optional
from six import itervalues
@@ -36,7 +35,6 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.config.server import is_threepid_reserved
from synapse.events import EventBase
from synapse.types import StateMap, UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches.lrucache import LruCache
@@ -94,34 +92,20 @@ class Auth(object):
)
@defer.inlineCallbacks
def check_user_in_room(
self,
room_id: str,
user_id: str,
current_state: Optional[StateMap[EventBase]] = None,
allow_departed_users: bool = False,
):
"""Check if the user is in the room, or was at some point.
def check_joined_room(self, room_id, user_id, current_state=None):
"""Check if the user is currently joined in the room
Args:
room_id: The room to check.
user_id: The user to check.
current_state: Optional map of the current state of the room.
room_id(str): The room to check.
user_id(str): The user to check.
current_state(dict): Optional map of the current state of the room.
If provided then that map is used to check whether they are a
member of the room. Otherwise the current membership is
loaded from the database.
allow_departed_users: if True, accept users that were previously
members but have now departed.
Raises:
AuthError if the user is/was not in the room.
AuthError if the user is not in the room.
Returns:
Deferred[Optional[EventBase]]:
Membership event for the user if the user was in the
room. This will be the join event if they are currently joined to
the room. This will be the leave event if they have left the room.
A deferred membership event for the user if the user is in
the room.
"""
if current_state:
member = current_state.get((EventTypes.Member, user_id), None)
@@ -129,19 +113,37 @@ class Auth(object):
member = yield self.state.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
self._check_joined_room(member, user_id, room_id)
return member
@defer.inlineCallbacks
def check_user_was_in_room(self, room_id, user_id):
"""Check if the user was in the room at some point.
Args:
room_id(str): The room to check.
user_id(str): The user to check.
Raises:
AuthError if the user was never in the room.
Returns:
A deferred membership event for the user if the user was in the
room. This will be the join event if they are currently joined to
the room. This will be the leave event if they have left the room.
"""
member = yield self.state.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None
if membership == Membership.JOIN:
return member
if membership not in (Membership.JOIN, Membership.LEAVE):
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
# XXX this looks totally bogus. Why do we not allow users who have been banned,
# or those who were members previously and have been re-invited?
if allow_departed_users and membership == Membership.LEAVE:
if membership == Membership.LEAVE:
forgot = yield self.store.did_forget(user_id, room_id)
if not forgot:
return member
if forgot:
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
return member
@defer.inlineCallbacks
def check_host_in_room(self, room_id, host):
@@ -149,6 +151,12 @@ class Auth(object):
latest_event_ids = yield self.store.is_host_joined(room_id, host)
return latest_event_ids
def _check_joined_room(self, member, user_id, room_id):
if not member or member.membership != Membership.JOIN:
raise AuthError(
403, "User %s not in room %s (%s)" % (user_id, room_id, repr(member))
)
def can_federate(self, event, auth_events):
creation_event = auth_events.get((EventTypes.Create, ""))
@@ -538,13 +546,13 @@ class Auth(object):
return defer.succeed(auth_ids)
@defer.inlineCallbacks
def check_can_change_room_list(self, room_id: str, user: UserID):
"""Determine whether the user is allowed to edit the room's entry in the
def check_can_change_room_list(self, room_id, user):
"""Check if the user is allowed to edit the room's entry in the
published room list.
Args:
room_id
user
room_id (str)
user (UserID)
"""
is_admin = yield self.is_server_admin(user)
@@ -552,11 +560,11 @@ class Auth(object):
return True
user_id = user.to_string()
yield self.check_user_in_room(room_id, user_id)
yield self.check_joined_room(room_id, user_id)
# We currently require the user is a "moderator" in the room. We do this
# by checking if they would (theoretically) be able to change the
# m.room.canonical_alias events
# m.room.aliases events
power_level_event = yield self.state.get_current_state(
room_id, EventTypes.PowerLevels, ""
)
@@ -566,11 +574,16 @@ class Auth(object):
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
send_level = event_auth.get_send_level(
EventTypes.CanonicalAlias, "", power_level_event
EventTypes.Aliases, "", power_level_event
)
user_level = event_auth.get_user_power_level(user_id, auth_events)
return user_level >= send_level
if user_level < send_level:
raise AuthError(
403,
"This server requires you to be a moderator in the room to"
" edit its room list entry",
)
@staticmethod
def has_access_token(request):
@@ -620,18 +633,10 @@ class Auth(object):
return query_params[0].decode("ascii")
@defer.inlineCallbacks
def check_user_in_room_or_world_readable(
self, room_id: str, user_id: str, allow_departed_users: bool = False
):
def check_in_room_or_world_readable(self, room_id, user_id):
"""Checks that the user is or was in the room or the room is world
readable. If it isn't then an exception is raised.
Args:
room_id: room to check
user_id: user to check
allow_departed_users: if True, accept users that were previously
members but have now departed
Returns:
Deferred[tuple[str, str|None]]: Resolves to the current membership of
the user in the room and the membership event ID of the user. If
@@ -640,14 +645,12 @@ class Auth(object):
"""
try:
# check_user_in_room will return the most recent membership
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_in_room(
room_id, user_id, allow_departed_users=allow_departed_users
)
member_event = yield self.check_user_was_in_room(room_id, user_id)
return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state.get_current_state(
@@ -659,9 +662,7 @@ class Auth(object):
):
return Membership.JOIN, None
raise AuthError(
403,
"User %s not in room %s, and room previews are disabled"
% (user_id, room_id),
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@defer.inlineCallbacks

View File

@@ -66,7 +66,6 @@ class Codes(object):
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
BAD_ALIAS = "M_BAD_ALIAS"
class CodeMessageException(RuntimeError):

View File

@@ -57,7 +57,7 @@ class RoomVersion(object):
state_res = attr.ib() # int; one of the StateResolutionVersions
enforce_key_validity = attr.ib() # bool
# bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
# bool: before MSC2260, anyone was allowed to send an aliases event
special_case_aliases_auth = attr.ib(type=bool, default=False)
@@ -102,13 +102,12 @@ class RoomVersions(object):
enforce_key_validity=True,
special_case_aliases_auth=True,
)
MSC2432_DEV = RoomVersion(
"org.matrix.msc2432",
MSC2260_DEV = RoomVersion(
"org.matrix.msc2260",
RoomDisposition.UNSTABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
)
@@ -120,6 +119,6 @@ KNOWN_ROOM_VERSIONS = {
RoomVersions.V3,
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.MSC2432_DEV,
RoomVersions.MSC2260_DEV,
)
} # type: Dict[str, RoomVersion]

View File

@@ -141,7 +141,7 @@ def start_reactor(
def quit_with_error(error_string):
message_lines = error_string.split("\n")
line_length = max(len(l) for l in message_lines if len(l) < 80) + 2
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
sys.stderr.write("*" * line_length + "\n")
for line in message_lines:
sys.stderr.write(" %s\n" % (line.rstrip(),))
@@ -276,19 +276,9 @@ def start(hs, listeners=None):
# It is now safe to start your Synapse.
hs.start_listening(listeners)
hs.get_datastore().db.start_profiling()
hs.get_pusherpool().start()
setup_sentry(hs)
setup_sdnotify(hs)
# We now freeze all allocated objects in the hopes that (almost)
# everything currently allocated are things that will be used for the
# rest of time. Doing so means less work each GC (hopefully).
#
# This only works on Python 3.7
if sys.version_info >= (3, 7):
gc.collect()
gc.freeze()
except Exception:
traceback.print_exc(file=sys.stderr)
reactor = hs.get_reactor()

View File

@@ -13,11 +13,161 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.appservice")
class AppserviceSlaveStore(
DirectoryStore,
SlavedEventStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
):
pass
class AppserviceServer(HomeServer):
DATASTORE_CLASS = AppserviceSlaveStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse appservice now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ASReplicationHandler(self)
class ASReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(ASReplicationHandler, self).__init__(hs.get_datastore())
self.appservice_handler = hs.get_application_service_handler()
async def on_rdata(self, stream_name, token, rows):
await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
if stream_name == "events":
max_stream_id = self.store.get_room_max_stream_ordering()
run_in_background(self._notify_app_services, max_stream_id)
@defer.inlineCallbacks
def _notify_app_services(self, room_stream_id):
try:
yield self.appservice_handler.notify_interested_services(room_stream_id)
except Exception:
logger.exception("Error notifying application services of event")
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse appservice", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.appservice"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``notify_appservices: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.notify_appservices = True
ps = AppserviceServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ps, config, use_worker_options=True)
ps.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ps, config.worker_listeners
)
_base.start_worker_reactor("synapse-appservice", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,11 +13,195 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.login import LoginRestServlet
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.room import (
JoinedRoomMemberListRestServlet,
PublicRoomListRestServlet,
RoomEventContextServlet,
RoomMemberListRestServlet,
RoomMessageListRestServlet,
RoomStateRestServlet,
)
from synapse.rest.client.v1.voip import VoipRestServlet
from synapse.rest.client.v2_alpha import groups
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.server import HomeServer
from synapse.storage.data_stores.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedGroupServerStore,
SlavedAccountDataStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedTransactionStore,
SlavedProfileStore,
SlavedClientIpStore,
MonthlyActiveUsersWorkerStore,
BaseSlavedStore,
):
pass
class ClientReaderServer(HomeServer):
DATASTORE_CLASS = ClientReaderSlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
PublicRoomListRestServlet(self).register(resource)
RoomMemberListRestServlet(self).register(resource)
JoinedRoomMemberListRestServlet(self).register(resource)
RoomStateRestServlet(self).register(resource)
RoomEventContextServlet(self).register(resource)
RoomMessageListRestServlet(self).register(resource)
RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource)
ThreepidRestServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
VersionsRestServlet(self).register(resource)
groups.register_servlets(self, resource)
resources.update({"/_matrix/client": resource})
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse client reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse client reader", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.client_reader"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = ClientReaderServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-client-reader", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,11 +13,191 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.v1.room import (
JoinRoomAliasServlet,
RoomMembershipRestServlet,
RoomSendEventRestServlet,
RoomStateEventRestServlet,
)
from synapse.server import HomeServer
from synapse.storage.data_stores.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.event_creator")
class EventCreatorSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
UserDirectoryStore,
DirectoryStore,
SlavedTransactionStore,
SlavedProfileStore,
SlavedAccountDataStore,
SlavedPusherStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedDeviceStore,
SlavedClientIpStore,
SlavedApplicationServiceStore,
SlavedEventStore,
SlavedRegistrationStore,
RoomStore,
MonthlyActiveUsersWorkerStore,
BaseSlavedStore,
):
pass
class EventCreatorServer(HomeServer):
DATASTORE_CLASS = EventCreatorSlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
RoomSendEventRestServlet(self).register(resource)
RoomMembershipRestServlet(self).register(resource)
RoomStateEventRestServlet(self).register(resource)
JoinRoomAliasServlet(self).register(resource)
ProfileAvatarURLRestServlet(self).register(resource)
ProfileDisplaynameRestServlet(self).register(resource)
ProfileRestServlet(self).register(resource)
resources.update(
{
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
}
)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse event creator now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse event creator", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.event_creator"
assert config.worker_replication_http_port is not None
# This should only be done on the user directory worker or the master
config.update_user_directory = False
events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = EventCreatorServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-event-creator", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,11 +13,177 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.data_stores.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.federation_reader")
class FederationReaderSlavedStore(
SlavedAccountDataStore,
SlavedProfileStore,
SlavedApplicationServiceStore,
SlavedPusherStore,
SlavedPushRuleStore,
SlavedReceiptsStore,
SlavedEventStore,
SlavedKeyStore,
SlavedRegistrationStore,
SlavedGroupServerStore,
SlavedDeviceStore,
RoomStore,
DirectoryStore,
SlavedTransactionStore,
MonthlyActiveUsersWorkerStore,
BaseSlavedStore,
):
pass
class FederationReaderServer(HomeServer):
DATASTORE_CLASS = FederationReaderSlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "federation":
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
if name == "openid" and "federation" not in res["names"]:
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
resources.update(
{
FEDERATION_PREFIX: TransportLayerServer(
self, servlet_groups=["openid"]
)
}
)
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
reactor=self.get_reactor(),
)
logger.info("Synapse federation reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse federation reader", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_reader"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = FederationReaderServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-federation-reader", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,11 +13,308 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation import send_queue
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams._base import (
DeviceListsStream,
ReceiptsStream,
ToDeviceStream,
)
from synapse.server import HomeServer
from synapse.storage.database import Database
from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.federation_sender")
class FederationSenderSlaveStore(
SlavedDeviceInboxStore,
SlavedTransactionStore,
SlavedReceiptsStore,
SlavedEventStore,
SlavedRegistrationStore,
SlavedDeviceStore,
SlavedPresenceStore,
):
def __init__(self, database: Database, db_conn, hs):
super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs)
# We pull out the current federation stream position now so that we
# always have a known value for the federation position in memory so
# that we don't have to bounce via a deferred once when we start the
# replication streams.
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
def _get_federation_out_pos(self, db_conn):
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
sql = self.database_engine.convert_param_style(sql)
txn = db_conn.cursor()
txn.execute(sql, ("federation",))
rows = txn.fetchall()
txn.close()
return rows[0][0] if rows else -1
class FederationSenderServer(HomeServer):
DATASTORE_CLASS = FederationSenderSlaveStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse federation_sender now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return FederationSenderReplicationHandler(self)
class FederationSenderReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
self.send_handler = FederationSenderHandler(hs, self)
async def on_rdata(self, stream_name, token, rows):
await super(FederationSenderReplicationHandler, self).on_rdata(
stream_name, token, rows
)
self.send_handler.process_replication_rows(stream_name, token, rows)
def get_streams_to_replicate(self):
args = super(
FederationSenderReplicationHandler, self
).get_streams_to_replicate()
args.update(self.send_handler.stream_positions())
return args
def on_remote_server_up(self, server: str):
"""Called when get a new REMOTE_SERVER_UP command."""
# Let's wake up the transaction queue for the server in case we have
# pending stuff to send to it.
self.send_handler.wake_destination(server)
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse federation sender", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``send_federation: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.send_federation = True
ss = FederationSenderServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-federation-sender", config)
class FederationSenderHandler(object):
"""Processes the replication stream and forwards the appropriate entries
to the federation sender.
"""
def __init__(self, hs: FederationSenderServer, replication_client):
self.store = hs.get_datastore()
self._is_mine_id = hs.is_mine_id
self.federation_sender = hs.get_federation_sender()
self.replication_client = replication_client
self.federation_position = self.store.federation_out_pos_startup
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
self._last_ack = self.federation_position
self._room_serials = {}
self._room_typing = {}
def on_start(self):
# There may be some events that are persisted but haven't been sent,
# so send them now.
self.federation_sender.notify_new_events(
self.store.get_room_max_stream_ordering()
)
def wake_destination(self, server: str):
self.federation_sender.wake_destination(server)
def stream_positions(self):
return {"federation": self.federation_position}
def process_replication_rows(self, stream_name, token, rows):
# The federation stream contains things that we want to send out, e.g.
# presence, typing, etc.
if stream_name == "federation":
send_queue.process_rows_for_federation(self.federation_sender, rows)
run_in_background(self.update_token, token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
# ... and when new receipts happen
elif stream_name == ReceiptsStream.NAME:
run_as_background_process(
"process_receipts_for_federation", self._on_new_receipts, rows
)
# ... as well as device updates and messages
elif stream_name == DeviceListsStream.NAME:
hosts = set(row.destination for row in rows)
for host in hosts:
self.federation_sender.send_device_messages(host)
elif stream_name == ToDeviceStream.NAME:
# The to_device stream includes stuff to be pushed to both local
# clients and remote servers, so we ignore entities that start with
# '@' (since they'll be local users rather than destinations).
hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
for host in hosts:
self.federation_sender.send_device_messages(host)
@defer.inlineCallbacks
def _on_new_receipts(self, rows):
"""
Args:
rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
new receipts to be processed
"""
for receipt in rows:
# we only want to send on receipts for our own users
if not self._is_mine_id(receipt.user_id):
continue
receipt_info = ReadReceipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
receipt.data,
)
yield self.federation_sender.send_read_receipt(receipt_info)
@defer.inlineCallbacks
def update_token(self, token):
try:
self.federation_position = token
# We linearize here to ensure we don't have races updating the token
with (yield self._fed_position_linearizer.queue(None)):
if self._last_ack < self.federation_position:
yield self.store.update_federation_out_pos(
"federation", self.federation_position
)
# We ACK this token over replication so that the master can drop
# its in memory queues
self.replication_client.send_federation_ack(
self.federation_position
)
self._last_ack = self.federation_position
except Exception:
logger.exception("Error updating federation stream position")
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,11 +13,241 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.errors import HttpResponseException, SynapseError
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.server import HomeServer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.frontend_proxy")
class PresenceStatusStubServlet(RestServlet):
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
def __init__(self, hs):
super(PresenceStatusStubServlet, self).__init__()
self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
self.main_uri = hs.config.worker_main_http_uri
@defer.inlineCallbacks
def on_GET(self, request, user_id):
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
headers = {"Authorization": auth_headers}
try:
result = yield self.http_client.get_json(
self.main_uri + request.uri.decode("ascii"), headers=headers
)
except HttpResponseException as e:
raise e.to_synapse_error()
return 200, result
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
return 200, {}
class KeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(KeyUploadServlet, self).__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
@defer.inlineCallbacks
def on_POST(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id,
device_id,
)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400, "To upload keys, you must pass device_id when authenticating"
)
if body:
# They're actually trying to upload something, proxy to main synapse.
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
headers = {"Authorization": auth_headers}
result = yield self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
return 200, result
else:
# Just interested in counts.
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
return 200, {"one_time_key_counts": result}
class FrontendProxySlavedStore(
SlavedDeviceStore,
SlavedClientIpStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
BaseSlavedStore,
):
pass
class FrontendProxyServer(HomeServer):
DATASTORE_CLASS = FrontendProxySlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
KeyUploadServlet(self).register(resource)
# If presence is disabled, use the stub servlet that does
# not allow sending presence
if not self.config.use_presence:
PresenceStatusStubServlet(self).register(resource)
resources.update(
{
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
}
)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
reactor=self.get_reactor(),
)
logger.info("Synapse client reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse frontend proxy", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.frontend_proxy"
assert config.worker_main_http_uri is not None
events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = FrontendProxyServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-frontend-proxy", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -1,923 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
import synapse.events
from synapse.api.constants import EventTypes
from synapse.api.errors import HttpResponseException, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation import send_queue
from synapse.federation.transport.server import TransportLayerServer
from synapse.handlers.presence import PresenceHandler, get_interested_parties
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams._base import (
DeviceListsStream,
ReceiptsStream,
ToDeviceStream,
)
from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client.v1 import events
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.login import LoginRestServlet
from synapse.rest.client.v1.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.room import (
JoinedRoomMemberListRestServlet,
JoinRoomAliasServlet,
PublicRoomListRestServlet,
RoomEventContextServlet,
RoomInitialSyncRestServlet,
RoomMemberListRestServlet,
RoomMembershipRestServlet,
RoomMessageListRestServlet,
RoomSendEventRestServlet,
RoomStateEventRestServlet,
RoomStateRestServlet,
)
from synapse.rest.client.v1.voip import VoipRestServlet
from synapse.rest.client.v2_alpha import groups, sync, user_directory
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
from synapse.storage.data_stores.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.data_stores.main.presence import UserPresenceState
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.generic_worker")
class PresenceStatusStubServlet(RestServlet):
"""If presence is disabled this servlet can be used to stub out setting
presence status, while proxying the getters to the master instance.
"""
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
def __init__(self, hs):
super(PresenceStatusStubServlet, self).__init__()
self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
self.main_uri = hs.config.worker_main_http_uri
async def on_GET(self, request, user_id):
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
headers = {"Authorization": auth_headers}
try:
result = await self.http_client.get_json(
self.main_uri + request.uri.decode("ascii"), headers=headers
)
except HttpResponseException as e:
raise e.to_synapse_error()
return 200, result
async def on_PUT(self, request, user_id):
await self.auth.get_user_by_req(request)
return 200, {}
class KeyUploadServlet(RestServlet):
"""An implementation of the `KeyUploadServlet` that responds to read only
requests, but otherwise proxies through to the master instance.
"""
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(KeyUploadServlet, self).__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
async def on_POST(self, request, device_id):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id,
device_id,
)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400, "To upload keys, you must pass device_id when authenticating"
)
if body:
# They're actually trying to upload something, proxy to main synapse.
# Pass through the auth headers, if any, in case the access token
# is there.
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
headers = {"Authorization": auth_headers}
result = await self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
return 200, result
else:
# Just interested in counts.
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
return 200, {"one_time_key_counts": result}
UPDATE_SYNCING_USERS_MS = 10 * 1000
class GenericWorkerPresence(object):
def __init__(self, hs):
self.hs = hs
self.is_mine_id = hs.is_mine_id
self.http_client = hs.get_simple_http_client()
self.store = hs.get_datastore()
self.user_to_num_current_syncs = {}
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
active_presence = self.store.take_presence_startup_info()
self.user_to_current_state = {state.user_id: state for state in active_presence}
# user_id -> last_sync_ms. Lists the users that have stopped syncing
# but we haven't notified the master of that yet
self.users_going_offline = {}
self._send_stop_syncing_loop = self.clock.looping_call(
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
)
self.process_id = random_string(16)
logger.info("Presence process_id is %r", self.process_id)
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
if self.hs.config.use_presence:
self.hs.get_tcp_replication().send_user_sync(
user_id, is_syncing, last_sync_ms
)
def mark_as_coming_online(self, user_id):
"""A user has started syncing. Send a UserSync to the master, unless they
had recently stopped syncing.
Args:
user_id (str)
"""
going_offline = self.users_going_offline.pop(user_id, None)
if not going_offline:
# Safe to skip because we haven't yet told the master they were offline
self.send_user_sync(user_id, True, self.clock.time_msec())
def mark_as_going_offline(self, user_id):
"""A user has stopped syncing. We wait before notifying the master as
its likely they'll come back soon. This allows us to avoid sending
a stopped syncing immediately followed by a started syncing notification
to the master
Args:
user_id (str)
"""
self.users_going_offline[user_id] = self.clock.time_msec()
def send_stop_syncing(self):
"""Check if there are any users who have stopped syncing a while ago
and haven't come back yet. If there are poke the master about them.
"""
now = self.clock.time_msec()
for user_id, last_sync_ms in list(self.users_going_offline.items()):
if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
self.users_going_offline.pop(user_id, None)
self.send_user_sync(user_id, False, last_sync_ms)
def set_state(self, user, state, ignore_status_msg=False):
# TODO Hows this supposed to work?
return defer.succeed(None)
get_states = __func__(PresenceHandler.get_states)
get_state = __func__(PresenceHandler.get_state)
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
def user_syncing(self, user_id, affect_presence):
if affect_presence:
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
self.user_to_num_current_syncs[user_id] = curr_sync + 1
# If we went from no in flight sync to some, notify replication
if self.user_to_num_current_syncs[user_id] == 1:
self.mark_as_coming_online(user_id)
def _end():
# We check that the user_id is in user_to_num_current_syncs because
# user_to_num_current_syncs may have been cleared if we are
# shutting down.
if affect_presence and user_id in self.user_to_num_current_syncs:
self.user_to_num_current_syncs[user_id] -= 1
# If we went from one in flight sync to non, notify replication
if self.user_to_num_current_syncs[user_id] == 0:
self.mark_as_going_offline(user_id)
@contextlib.contextmanager
def _user_syncing():
try:
yield
finally:
_end()
return defer.succeed(_user_syncing())
@defer.inlineCallbacks
def notify_from_replication(self, states, stream_id):
parties = yield get_interested_parties(self.store, states)
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
"presence_key",
stream_id,
rooms=room_ids_to_states.keys(),
users=users_to_states.keys(),
)
@defer.inlineCallbacks
def process_replication_rows(self, token, rows):
states = [
UserPresenceState(
row.user_id,
row.state,
row.last_active_ts,
row.last_federation_update_ts,
row.last_user_sync_ts,
row.status_msg,
row.currently_active,
)
for row in rows
]
for state in states:
self.user_to_current_state[state.user_id] = state
stream_id = token
yield self.notify_from_replication(states, stream_id)
def get_currently_syncing_users(self):
if self.hs.config.use_presence:
return [
user_id
for user_id, count in self.user_to_num_current_syncs.items()
if count > 0
]
else:
return set()
class GenericWorkerTyping(object):
def __init__(self, hs):
self._latest_room_serial = 0
self._reset()
def _reset(self):
"""
Reset the typing handler's data caches.
"""
# map room IDs to serial numbers
self._room_serials = {}
# map room IDs to sets of users currently typing
self._room_typing = {}
def stream_positions(self):
# We must update this typing token from the response of the previous
# sync. In particular, the stream id may "reset" back to zero/a low
# value which we *must* use for the next replication request.
return {"typing": self._latest_room_serial}
def process_replication_rows(self, token, rows):
if self._latest_room_serial > token:
# The master has gone backwards. To prevent inconsistent data, just
# clear everything.
self._reset()
# Set the latest serial token to whatever the server gave us.
self._latest_room_serial = token
for row in rows:
self._room_serials[row.room_id] = token
self._room_typing[row.room_id] = row.user_ids
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
UserDirectoryStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedGroupServerStore,
SlavedAccountDataStore,
SlavedPusherStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedTransactionStore,
SlavedProfileStore,
SlavedClientIpStore,
SlavedPresenceStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
BaseSlavedStore,
):
def __init__(self, database, db_conn, hs):
super(GenericWorkerSlavedStore, self).__init__(database, db_conn, hs)
# We pull out the current federation stream position now so that we
# always have a known value for the federation position in memory so
# that we don't have to bounce via a deferred once when we start the
# replication streams.
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
def _get_federation_out_pos(self, db_conn):
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
sql = self.database_engine.convert_param_style(sql)
txn = db_conn.cursor()
txn.execute(sql, ("federation",))
rows = txn.fetchall()
txn.close()
return rows[0][0] if rows else -1
class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
PublicRoomListRestServlet(self).register(resource)
RoomMemberListRestServlet(self).register(resource)
JoinedRoomMemberListRestServlet(self).register(resource)
RoomStateRestServlet(self).register(resource)
RoomEventContextServlet(self).register(resource)
RoomMessageListRestServlet(self).register(resource)
RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource)
ThreepidRestServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
VersionsRestServlet(self).register(resource)
RoomSendEventRestServlet(self).register(resource)
RoomMembershipRestServlet(self).register(resource)
RoomStateEventRestServlet(self).register(resource)
JoinRoomAliasServlet(self).register(resource)
ProfileAvatarURLRestServlet(self).register(resource)
ProfileDisplaynameRestServlet(self).register(resource)
ProfileRestServlet(self).register(resource)
KeyUploadServlet(self).register(resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
InitialSyncRestServlet(self).register(resource)
RoomInitialSyncRestServlet(self).register(resource)
user_directory.register_servlets(self, resource)
# If presence is disabled, use the stub servlet that does
# not allow sending presence
if not self.config.use_presence:
PresenceStatusStubServlet(self).register(resource)
groups.register_servlets(self, resource)
resources.update({CLIENT_API_PREFIX: resource})
elif name == "federation":
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
elif name == "media":
if self.config.can_load_media_repo:
media_repo = self.get_media_repository_resource()
# We need to serve the admin servlets for media on the
# worker.
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources.update(
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
"/_synapse/admin": admin_resource,
}
)
else:
logger.warning(
"A 'media' listener is configured but the media"
" repository is disabled. Ignoring."
)
if name == "openid" and "federation" not in res["names"]:
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
resources.update(
{
FEDERATION_PREFIX: TransportLayerServer(
self, servlet_groups=["openid"]
)
}
)
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
reactor=self.get_reactor(),
)
logger.info("Synapse worker now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def remove_pusher(self, app_id, push_key, user_id):
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
def build_tcp_replication(self):
return GenericWorkerReplicationHandler(self)
def build_presence_handler(self):
return GenericWorkerPresence(self)
def build_typing_handler(self):
return GenericWorkerTyping(self)
class GenericWorkerReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(GenericWorkerReplicationHandler, self).__init__(hs.get_datastore())
self.store = hs.get_datastore()
self.typing_handler = hs.get_typing_handler()
# NB this is a SynchrotronPresence, not a normal PresenceHandler
self.presence_handler = hs.get_presence_handler()
self.notifier = hs.get_notifier()
self.notify_pushers = hs.config.start_pushers
self.pusher_pool = hs.get_pusherpool()
if hs.config.send_federation:
self.send_handler = FederationSenderHandler(hs, self)
else:
self.send_handler = None
async def on_rdata(self, stream_name, token, rows):
await super(GenericWorkerReplicationHandler, self).on_rdata(
stream_name, token, rows
)
run_in_background(self.process_and_notify, stream_name, token, rows)
def get_streams_to_replicate(self):
args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate()
args.update(self.typing_handler.stream_positions())
if self.send_handler:
args.update(self.send_handler.stream_positions())
return args
def get_currently_syncing_users(self):
return self.presence_handler.get_currently_syncing_users()
async def process_and_notify(self, stream_name, token, rows):
try:
if self.send_handler:
self.send_handler.process_replication_rows(stream_name, token, rows)
if stream_name == "events":
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
for row in rows:
if row.type != EventsStreamEventRow.TypeId:
continue
assert isinstance(row, EventsStreamRow)
event = await self.store.get_event(
row.data.event_id, allow_rejected=True
)
if event.rejected_reason:
continue
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
max_token = self.store.get_room_max_stream_ordering()
self.notifier.on_new_room_event(
event, token, max_token, extra_users
)
await self.pusher_pool.on_new_notifications(token, token)
elif stream_name == "push_rules":
self.notifier.on_new_event(
"push_rules_key", token, users=[row.user_id for row in rows]
)
elif stream_name in ("account_data", "tag_account_data"):
self.notifier.on_new_event(
"account_data_key", token, users=[row.user_id for row in rows]
)
elif stream_name == "receipts":
self.notifier.on_new_event(
"receipt_key", token, rooms=[row.room_id for row in rows]
)
await self.pusher_pool.on_new_receipts(
token, token, {row.room_id for row in rows}
)
elif stream_name == "typing":
self.typing_handler.process_replication_rows(token, rows)
self.notifier.on_new_event(
"typing_key", token, rooms=[row.room_id for row in rows]
)
elif stream_name == "to_device":
entities = [row.entity for row in rows if row.entity.startswith("@")]
if entities:
self.notifier.on_new_event("to_device_key", token, users=entities)
elif stream_name == "device_lists":
all_room_ids = set()
for row in rows:
room_ids = await self.store.get_rooms_for_user(row.user_id)
all_room_ids.update(room_ids)
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
elif stream_name == "presence":
await self.presence_handler.process_replication_rows(token, rows)
elif stream_name == "receipts":
self.notifier.on_new_event(
"groups_key", token, users=[row.user_id for row in rows]
)
elif stream_name == "pushers":
for row in rows:
if row.deleted:
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
await self.start_pusher(row.user_id, row.app_id, row.pushkey)
except Exception:
logger.exception("Error processing replication")
def stop_pusher(self, user_id, app_id, pushkey):
if not self.notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
pusher = pushers_for_user.pop(key, None)
if pusher is None:
return
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
async def start_pusher(self, user_id, app_id, pushkey):
if not self.notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
def on_remote_server_up(self, server: str):
"""Called when get a new REMOTE_SERVER_UP command."""
# Let's wake up the transaction queue for the server in case we have
# pending stuff to send to it.
if self.send_handler:
self.send_handler.wake_destination(server)
class FederationSenderHandler(object):
"""Processes the replication stream and forwards the appropriate entries
to the federation sender.
"""
def __init__(self, hs: GenericWorkerServer, replication_client):
self.store = hs.get_datastore()
self._is_mine_id = hs.is_mine_id
self.federation_sender = hs.get_federation_sender()
self.replication_client = replication_client
self.federation_position = self.store.federation_out_pos_startup
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
self._last_ack = self.federation_position
self._room_serials = {}
self._room_typing = {}
def on_start(self):
# There may be some events that are persisted but haven't been sent,
# so send them now.
self.federation_sender.notify_new_events(
self.store.get_room_max_stream_ordering()
)
def wake_destination(self, server: str):
self.federation_sender.wake_destination(server)
def stream_positions(self):
return {"federation": self.federation_position}
def process_replication_rows(self, stream_name, token, rows):
# The federation stream contains things that we want to send out, e.g.
# presence, typing, etc.
if stream_name == "federation":
send_queue.process_rows_for_federation(self.federation_sender, rows)
run_in_background(self.update_token, token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
# ... and when new receipts happen
elif stream_name == ReceiptsStream.NAME:
run_as_background_process(
"process_receipts_for_federation", self._on_new_receipts, rows
)
# ... as well as device updates and messages
elif stream_name == DeviceListsStream.NAME:
hosts = {row.destination for row in rows}
for host in hosts:
self.federation_sender.send_device_messages(host)
elif stream_name == ToDeviceStream.NAME:
# The to_device stream includes stuff to be pushed to both local
# clients and remote servers, so we ignore entities that start with
# '@' (since they'll be local users rather than destinations).
hosts = {row.entity for row in rows if not row.entity.startswith("@")}
for host in hosts:
self.federation_sender.send_device_messages(host)
async def _on_new_receipts(self, rows):
"""
Args:
rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
new receipts to be processed
"""
for receipt in rows:
# we only want to send on receipts for our own users
if not self._is_mine_id(receipt.user_id):
continue
receipt_info = ReadReceipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
receipt.data,
)
await self.federation_sender.send_read_receipt(receipt_info)
async def update_token(self, token):
try:
self.federation_position = token
# We linearize here to ensure we don't have races updating the token
with (await self._fed_position_linearizer.queue(None)):
if self._last_ack < self.federation_position:
await self.store.update_federation_out_pos(
"federation", self.federation_position
)
# We ACK this token over replication so that the master can drop
# its in memory queues
self.replication_client.send_federation_ack(
self.federation_position
)
self._last_ack = self.federation_position
except Exception:
logger.exception("Error updating federation stream position")
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse worker", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
# For backwards compatibility let any of the old app names.
assert config.worker_app in (
"synapse.app.appservice",
"synapse.app.client_reader",
"synapse.app.event_creator",
"synapse.app.federation_reader",
"synapse.app.federation_sender",
"synapse.app.frontend_proxy",
"synapse.app.generic_worker",
"synapse.app.media_repository",
"synapse.app.pusher",
"synapse.app.synchrotron",
"synapse.app.user_dir",
)
if config.worker_app == "synapse.app.appservice":
if config.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``notify_appservices: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the appservice to start since they will be disabled in the main config
config.notify_appservices = True
if config.worker_app == "synapse.app.pusher":
if config.start_pushers:
sys.stderr.write(
"\nThe pushers must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``start_pushers: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.start_pushers = True
if config.worker_app == "synapse.app.user_dir":
if config.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``update_user_directory: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.update_user_directory = True
if config.worker_app == "synapse.app.federation_sender":
if config.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``send_federation: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.send_federation = True
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = GenericWorkerServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-generic-worker", config)
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])

View File

@@ -298,11 +298,6 @@ class SynapseHomeServer(HomeServer):
# Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
current_mau_by_service_gauge = Gauge(
"synapse_admin_mau_current_mau_by_service",
"Current MAU by service",
["app_service"],
)
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
registered_reserved_users_mau_gauge = Gauge(
"synapse_admin_mau:registered_reserved_users",
@@ -408,6 +403,7 @@ def setup(config_options):
_base.start(hs, config.listeners)
hs.get_pusherpool().start()
hs.get_datastore().db.updates.start_doing_background_updates()
except Exception:
# Print the exception and bail out.
@@ -589,20 +585,12 @@ def run(hs):
@defer.inlineCallbacks
def generate_monthly_active_users():
current_mau_count = 0
current_mau_count_by_service = {}
reserved_users = ()
store = hs.get_datastore()
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
current_mau_count = yield store.get_monthly_active_count()
current_mau_count_by_service = (
yield store.get_monthly_active_count_by_service()
)
reserved_users = yield store.get_registered_reserved_users()
current_mau_gauge.set(float(current_mau_count))
for app_service, count in current_mau_count_by_service.items():
current_mau_by_service_gauge.labels(app_service).set(float(count))
registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
max_mau_gauge.set(float(hs.config.max_mau_value))

View File

@@ -13,11 +13,162 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.server import HomeServer
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.media_repository")
class MediaRepositorySlavedStore(
RoomStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedClientIpStore,
SlavedTransactionStore,
BaseSlavedStore,
MediaRepositoryStore,
):
pass
class MediaRepositoryServer(HomeServer):
DATASTORE_CLASS = MediaRepositorySlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "media":
media_repo = self.get_media_repository_resource()
# We need to serve the admin servlets for media on the
# worker.
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources.update(
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
"/_synapse/admin": admin_resource,
}
)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse media repository now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return ReplicationClientHandler(self.get_datastore())
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse media repository", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.media_repository"
if config.enable_media_repo:
_base.quit_with_error(
"enable_media_repo must be disabled in the main synapse process\n"
"before the media repo can be run in a separate worker.\n"
"Please add ``enable_media_repo: false`` to the main config\n"
)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = MediaRepositoryServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-media-repository", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -13,12 +13,213 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.pusher")
class PusherSlaveStore(
SlavedEventStore,
SlavedPusherStore,
SlavedReceiptsStore,
SlavedAccountDataStore,
RoomStore,
):
update_pusher_last_stream_ordering_and_success = __func__(
DataStore.update_pusher_last_stream_ordering_and_success
)
update_pusher_failing_since = __func__(DataStore.update_pusher_failing_since)
update_pusher_last_stream_ordering = __func__(
DataStore.update_pusher_last_stream_ordering
)
get_throttle_params_by_room = __func__(DataStore.get_throttle_params_by_room)
set_throttle_params = __func__(DataStore.set_throttle_params)
get_time_of_last_push_action_before = __func__(
DataStore.get_time_of_last_push_action_before
)
get_profile_displayname = __func__(DataStore.get_profile_displayname)
class PusherServer(HomeServer):
DATASTORE_CLASS = PusherSlaveStore
def remove_pusher(self, app_id, push_key, user_id):
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse pusher now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return PusherReplicationHandler(self)
class PusherReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
self.pusher_pool = hs.get_pusherpool()
async def on_rdata(self, stream_name, token, rows):
await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
run_in_background(self.poke_pushers, stream_name, token, rows)
@defer.inlineCallbacks
def poke_pushers(self, stream_name, token, rows):
try:
if stream_name == "pushers":
for row in rows:
if row.deleted:
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
elif stream_name == "events":
yield self.pusher_pool.on_new_notifications(token, token)
elif stream_name == "receipts":
yield self.pusher_pool.on_new_receipts(
token, token, set(row.room_id for row in rows)
)
except Exception:
logger.exception("Error poking pushers")
def stop_pusher(self, user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
pusher = pushers_for_user.pop(key, None)
if pusher is None:
return
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
def start_pusher(self, user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse pusher", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.pusher"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.start_pushers:
sys.stderr.write(
"\nThe pushers must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``start_pushers: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.start_pushers = True
ps = PusherServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ps, config, use_worker_options=True)
ps.setup()
def start():
_base.start(ps, config.worker_listeners)
ps.get_pusherpool().start()
reactor.addSystemEventTrigger("before", "startup", start)
_base.start_worker_reactor("synapse-pusher", config)
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])
ps = start(sys.argv[1:])

View File

@@ -13,11 +13,454 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from six import iteritems
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse.api.constants import EventTypes
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.handlers.presence import PresenceHandler, get_interested_parties
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
from synapse.rest.client.v1 import events
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
from synapse.rest.client.v2_alpha import sync
from synapse.server import HomeServer
from synapse.storage.data_stores.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.data_stores.main.presence import UserPresenceState
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.synchrotron")
class SynchrotronSlavedStore(
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
SlavedPresenceStore,
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
MonthlyActiveUsersWorkerStore,
BaseSlavedStore,
):
pass
UPDATE_SYNCING_USERS_MS = 10 * 1000
class SynchrotronPresence(object):
def __init__(self, hs):
self.hs = hs
self.is_mine_id = hs.is_mine_id
self.http_client = hs.get_simple_http_client()
self.store = hs.get_datastore()
self.user_to_num_current_syncs = {}
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
active_presence = self.store.take_presence_startup_info()
self.user_to_current_state = {state.user_id: state for state in active_presence}
# user_id -> last_sync_ms. Lists the users that have stopped syncing
# but we haven't notified the master of that yet
self.users_going_offline = {}
self._send_stop_syncing_loop = self.clock.looping_call(
self.send_stop_syncing, 10 * 1000
)
self.process_id = random_string(16)
logger.info("Presence process_id is %r", self.process_id)
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
if self.hs.config.use_presence:
self.hs.get_tcp_replication().send_user_sync(
user_id, is_syncing, last_sync_ms
)
def mark_as_coming_online(self, user_id):
"""A user has started syncing. Send a UserSync to the master, unless they
had recently stopped syncing.
Args:
user_id (str)
"""
going_offline = self.users_going_offline.pop(user_id, None)
if not going_offline:
# Safe to skip because we haven't yet told the master they were offline
self.send_user_sync(user_id, True, self.clock.time_msec())
def mark_as_going_offline(self, user_id):
"""A user has stopped syncing. We wait before notifying the master as
its likely they'll come back soon. This allows us to avoid sending
a stopped syncing immediately followed by a started syncing notification
to the master
Args:
user_id (str)
"""
self.users_going_offline[user_id] = self.clock.time_msec()
def send_stop_syncing(self):
"""Check if there are any users who have stopped syncing a while ago
and haven't come back yet. If there are poke the master about them.
"""
now = self.clock.time_msec()
for user_id, last_sync_ms in list(self.users_going_offline.items()):
if now - last_sync_ms > 10 * 1000:
self.users_going_offline.pop(user_id, None)
self.send_user_sync(user_id, False, last_sync_ms)
def set_state(self, user, state, ignore_status_msg=False):
# TODO Hows this supposed to work?
return defer.succeed(None)
get_states = __func__(PresenceHandler.get_states)
get_state = __func__(PresenceHandler.get_state)
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
def user_syncing(self, user_id, affect_presence):
if affect_presence:
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
self.user_to_num_current_syncs[user_id] = curr_sync + 1
# If we went from no in flight sync to some, notify replication
if self.user_to_num_current_syncs[user_id] == 1:
self.mark_as_coming_online(user_id)
def _end():
# We check that the user_id is in user_to_num_current_syncs because
# user_to_num_current_syncs may have been cleared if we are
# shutting down.
if affect_presence and user_id in self.user_to_num_current_syncs:
self.user_to_num_current_syncs[user_id] -= 1
# If we went from one in flight sync to non, notify replication
if self.user_to_num_current_syncs[user_id] == 0:
self.mark_as_going_offline(user_id)
@contextlib.contextmanager
def _user_syncing():
try:
yield
finally:
_end()
return defer.succeed(_user_syncing())
@defer.inlineCallbacks
def notify_from_replication(self, states, stream_id):
parties = yield get_interested_parties(self.store, states)
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
"presence_key",
stream_id,
rooms=room_ids_to_states.keys(),
users=users_to_states.keys(),
)
@defer.inlineCallbacks
def process_replication_rows(self, token, rows):
states = [
UserPresenceState(
row.user_id,
row.state,
row.last_active_ts,
row.last_federation_update_ts,
row.last_user_sync_ts,
row.status_msg,
row.currently_active,
)
for row in rows
]
for state in states:
self.user_to_current_state[state.user_id] = state
stream_id = token
yield self.notify_from_replication(states, stream_id)
def get_currently_syncing_users(self):
if self.hs.config.use_presence:
return [
user_id
for user_id, count in iteritems(self.user_to_num_current_syncs)
if count > 0
]
else:
return set()
class SynchrotronTyping(object):
def __init__(self, hs):
self._latest_room_serial = 0
self._reset()
def _reset(self):
"""
Reset the typing handler's data caches.
"""
# map room IDs to serial numbers
self._room_serials = {}
# map room IDs to sets of users currently typing
self._room_typing = {}
def stream_positions(self):
# We must update this typing token from the response of the previous
# sync. In particular, the stream id may "reset" back to zero/a low
# value which we *must* use for the next replication request.
return {"typing": self._latest_room_serial}
def process_replication_rows(self, token, rows):
if self._latest_room_serial > token:
# The master has gone backwards. To prevent inconsistent data, just
# clear everything.
self._reset()
# Set the latest serial token to whatever the server gave us.
self._latest_room_serial = token
for row in rows:
self._room_serials[row.room_id] = token
self._room_typing[row.room_id] = row.user_ids
class SynchrotronApplicationService(object):
def notify_interested_services(self, event):
pass
class SynchrotronServer(HomeServer):
DATASTORE_CLASS = SynchrotronSlavedStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
InitialSyncRestServlet(self).register(resource)
RoomInitialSyncRestServlet(self).register(resource)
resources.update(
{
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
}
)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse synchrotron now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return SyncReplicationHandler(self)
def build_presence_handler(self):
return SynchrotronPresence(self)
def build_typing_handler(self):
return SynchrotronTyping(self)
class SyncReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
self.store = hs.get_datastore()
self.typing_handler = hs.get_typing_handler()
# NB this is a SynchrotronPresence, not a normal PresenceHandler
self.presence_handler = hs.get_presence_handler()
self.notifier = hs.get_notifier()
async def on_rdata(self, stream_name, token, rows):
await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
run_in_background(self.process_and_notify, stream_name, token, rows)
def get_streams_to_replicate(self):
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
args.update(self.typing_handler.stream_positions())
return args
def get_currently_syncing_users(self):
return self.presence_handler.get_currently_syncing_users()
async def process_and_notify(self, stream_name, token, rows):
try:
if stream_name == "events":
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
for row in rows:
if row.type != EventsStreamEventRow.TypeId:
continue
assert isinstance(row, EventsStreamRow)
event = await self.store.get_event(
row.data.event_id, allow_rejected=True
)
if event.rejected_reason:
continue
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
max_token = self.store.get_room_max_stream_ordering()
self.notifier.on_new_room_event(
event, token, max_token, extra_users
)
elif stream_name == "push_rules":
self.notifier.on_new_event(
"push_rules_key", token, users=[row.user_id for row in rows]
)
elif stream_name in ("account_data", "tag_account_data"):
self.notifier.on_new_event(
"account_data_key", token, users=[row.user_id for row in rows]
)
elif stream_name == "receipts":
self.notifier.on_new_event(
"receipt_key", token, rooms=[row.room_id for row in rows]
)
elif stream_name == "typing":
self.typing_handler.process_replication_rows(token, rows)
self.notifier.on_new_event(
"typing_key", token, rooms=[row.room_id for row in rows]
)
elif stream_name == "to_device":
entities = [row.entity for row in rows if row.entity.startswith("@")]
if entities:
self.notifier.on_new_event("to_device_key", token, users=entities)
elif stream_name == "device_lists":
all_room_ids = set()
for row in rows:
room_ids = await self.store.get_rooms_for_user(row.user_id)
all_room_ids.update(room_ids)
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
elif stream_name == "presence":
await self.presence_handler.process_replication_rows(token, rows)
elif stream_name == "receipts":
self.notifier.on_new_event(
"groups_key", token, users=[row.user_id for row in rows]
)
except Exception:
logger.exception("Error processing replication")
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse synchrotron", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.synchrotron"
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = SynchrotronServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
application_service_handler=SynchrotronApplicationService(),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-synchrotron", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -14,10 +14,217 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from synapse.app.generic_worker import start
from synapse.util.logcontext import LoggingContext
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams.events import (
EventsStream,
EventsStreamCurrentStateRow,
)
from synapse.rest.client.v2_alpha import user_directory
from synapse.server import HomeServer
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
from synapse.storage.database import Database
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.user_dir")
class UserDirectorySlaveStore(
SlavedEventStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedClientIpStore,
UserDirectoryStore,
BaseSlavedStore,
):
def __init__(self, database: Database, db_conn, hs):
super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs)
events_max = self._stream_id_gen.get_current_token()
curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
db_conn,
"current_state_delta_stream",
entity_column="room_id",
stream_column="stream_id",
max_value=events_max, # As we share the stream id with events token
limit=1000,
)
self._curr_state_delta_stream_cache = StreamChangeCache(
"_curr_state_delta_stream_cache",
min_curr_state_delta_id,
prefilled_cache=curr_state_delta_prefill,
)
def stream_positions(self):
result = super(UserDirectorySlaveStore, self).stream_positions()
return result
def process_replication_rows(self, stream_name, token, rows):
if stream_name == EventsStream.NAME:
self._stream_id_gen.advance(token)
for row in rows:
if row.type != EventsStreamCurrentStateRow.TypeId:
continue
self._curr_state_delta_stream_cache.entity_has_changed(
row.data.room_id, token
)
return super(UserDirectorySlaveStore, self).process_replication_rows(
stream_name, token, rows
)
class UserDirectoryServer(HomeServer):
DATASTORE_CLASS = UserDirectorySlaveStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
user_directory.register_servlets(self, resource)
resources.update(
{
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
}
)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse user_dir now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return UserDirectoryReplicationHandler(self)
class UserDirectoryReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
self.user_directory = hs.get_user_directory_handler()
async def on_rdata(self, stream_name, token, rows):
await super(UserDirectoryReplicationHandler, self).on_rdata(
stream_name, token, rows
)
if stream_name == EventsStream.NAME:
run_in_background(self._notify_directory)
@defer.inlineCallbacks
def _notify_directory(self):
try:
yield self.user_directory.notify_new_event()
except Exception:
logger.exception("Error notifiying user directory of state update")
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse user directory", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.user_dir"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``update_user_directory: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.update_user_directory = True
ss = UserDirectoryServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-user-dir", config)
if __name__ == "__main__":
with LoggingContext("main"):

View File

@@ -53,18 +53,6 @@ Missing mandatory `server_name` config option.
"""
CONFIG_FILE_HEADER = """\
# Configuration file for Synapse.
#
# This is a YAML file: see [1] for a quick introduction. Note in particular
# that *indentation is important*: all the elements of a list or dictionary
# should have the same indentation.
#
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
"""
def path_exists(file_path):
"""Check if a file exists
@@ -356,7 +344,7 @@ class RootConfig(object):
str: the yaml config file
"""
return CONFIG_FILE_HEADER + "\n\n".join(
return "\n\n".join(
dedent(conf)
for conf in self.invoke_all(
"generate_config_section",
@@ -586,8 +574,8 @@ class RootConfig(object):
if not path_exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "w") as config_file:
config_file.write("# vim:ft=yaml\n\n")
config_file.write(config_str)
config_file.write("\n\n# vim:ft=yaml")
config_dict = yaml.safe_load(config_str)
obj.generate_missing_files(config_dict, config_dir_path)

View File

@@ -24,7 +24,6 @@ from synapse.config import (
server,
server_notices_config,
spam_checker,
sso,
stats,
third_party_event_rules,
tls,
@@ -58,7 +57,6 @@ class RootConfig:
key: key.KeyConfig
saml2: saml2_config.SAML2Config
cas: cas.CasConfig
sso: sso.SSOConfig
jwt: jwt_config.JWTConfig
password: password.PasswordConfig
email: emailconfig.EmailConfig

View File

@@ -27,12 +27,6 @@ import pkg_resources
from ._base import Config, ConfigError
MISSING_PASSWORD_RESET_CONFIG_ERROR = """\
Password reset emails are enabled on this homeserver due to a partial
'email' block. However, the following required keys are missing:
%s
"""
class EmailConfig(Config):
section = "email"
@@ -148,18 +142,24 @@ class EmailConfig(Config):
bleach
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
required = ["smtp_host", "smtp_port", "notif_from"]
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
for k in required:
if k not in email_config:
missing.append("email." + k)
# public_baseurl is required to build password reset and validation links that
# will be emailed to users
if config.get("public_baseurl") is None:
missing.append("public_baseurl")
if missing:
raise ConfigError(
MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),)
if len(missing) > 0:
raise RuntimeError(
"Password resets emails are configured to be sent from "
"this homeserver due to a partial 'email' block. "
"However, the following required keys are missing: %s"
% (", ".join(missing),)
)
# These email templates have placeholders in them, and thus must be
@@ -245,25 +245,32 @@ class EmailConfig(Config):
)
if self.email_enable_notifs:
required = [
"smtp_host",
"smtp_port",
"notif_from",
"notif_template_html",
"notif_template_text",
]
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
for k in required:
if k not in email_config:
missing.append(k)
if config.get("public_baseurl") is None:
missing.append("public_baseurl")
if missing:
raise ConfigError(
if len(missing) > 0:
raise RuntimeError(
"email.enable_notifs is True but required keys are missing: %s"
% (", ".join(missing),)
% (", ".join(["email." + k for k in missing]),)
)
self.email_notif_template_html = email_config.get(
"notif_template_html", "notif_mail.html"
)
self.email_notif_template_text = email_config.get(
"notif_template_text", "notif_mail.txt"
)
if config.get("public_baseurl") is None:
raise RuntimeError(
"email.enable_notifs is True but no public_baseurl is set"
)
self.email_notif_template_html = email_config["notif_template_html"]
self.email_notif_template_text = email_config["notif_template_text"]
for f in self.email_notif_template_text, self.email_notif_template_html:
p = os.path.join(self.email_template_dir, f)
@@ -316,6 +323,10 @@ class EmailConfig(Config):
#
#require_transport_security: true
# Enable sending emails for messages that the user has missed
#
#enable_notifs: false
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
@@ -333,11 +344,6 @@ class EmailConfig(Config):
#
#app_name: my_branded_matrix_server
# Uncomment the following to enable sending emails for messages that the user
# has missed. Disabled by default.
#
#enable_notifs: true
# Uncomment the following to disable automatic subscription to email
# notifications for new users. Enabled by default.
#

View File

@@ -38,7 +38,6 @@ from .saml2_config import SAML2Config
from .server import ServerConfig
from .server_notices_config import ServerNoticesConfig
from .spam_checker import SpamCheckerConfig
from .sso import SSOConfig
from .stats import StatsConfig
from .third_party_event_rules import ThirdPartyRulesConfig
from .tls import TlsConfig
@@ -66,7 +65,6 @@ class HomeServerConfig(RootConfig):
KeyConfig,
SAML2Config,
CasConfig,
SSOConfig,
JWTConfig,
PasswordConfig,
EmailConfig,

View File

@@ -15,9 +15,6 @@
# limitations under the License.
import logging
import os
import pkg_resources
from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module, load_python_module
@@ -163,14 +160,6 @@ class SAML2Config(Config):
saml2_config.get("saml_session_lifetime", "5m")
)
template_dir = saml2_config.get("template_dir")
if not template_dir:
template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
self.saml2_error_html_content = self.read_file(
os.path.join(template_dir, "saml_error.html"), "saml2_config.saml_error",
)
def _default_saml_config_dict(
self, required_attributes: set, optional_attributes: set
):
@@ -336,25 +325,6 @@ class SAML2Config(Config):
# The default is 'uid'.
#
#grandfathered_mxid_source_attribute: upn
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
# If you *do* uncomment it, you will need to make sure that all the templates
# below are in the directory.
#
# Synapse will look for the following templates in this directory:
#
# * HTML page to display to users if something goes wrong during the
# authentication process: 'saml_error.html'.
#
# This template doesn't currently need any variable to render.
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
""" % {
"config_dir_path": config_dir_path
}

View File

@@ -1066,12 +1066,12 @@ KNOWN_RESOURCES = (
def _check_resource_config(listeners):
resource_names = {
resource_names = set(
res_name
for listener in listeners
for res in listener.get("resources", [])
for res_name in res.get("names", [])
}
)
for resource in resource_names:
if resource not in KNOWN_RESOURCES:

View File

@@ -1,92 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
import pkg_resources
from ._base import Config
class SSOConfig(Config):
"""SSO Configuration
"""
section = "sso"
def read_config(self, config, **kwargs):
sso_config = config.get("sso") or {} # type: Dict[str, Any]
# Pick a template directory in order of:
# * The sso-specific template_dir
# * /path/to/synapse/install/res/templates
template_dir = sso_config.get("template_dir")
if not template_dir:
template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
self.sso_redirect_confirm_template_dir = template_dir
self.sso_client_whitelist = sso_config.get("client_whitelist") or []
def generate_config_section(self, **kwargs):
return """\
# Additional settings to use with single-sign on systems such as SAML2 and CAS.
#
sso:
# A list of client URLs which are whitelisted so that the user does not
# have to confirm giving access to their account to the URL. Any client
# whose URL starts with an entry in the following list will not be subject
# to an additional confirmation step after the SSO login is completed.
#
# WARNING: An entry such as "https://my.client" is insecure, because it
# will also match "https://my.client.evil.site", exposing your users to
# phishing attacks from evil.site. To avoid this, include a slash after the
# hostname: "https://my.client/".
#
# By default, this list is empty.
#
#client_whitelist:
# - https://riot.im/develop
# - https://my.custom.client/
# Directory in which Synapse will try to find the template files below.
# If not set, default templates from within the Synapse package will be used.
#
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
# If you *do* uncomment it, you will need to make sure that all the templates
# below are in the directory.
#
# Synapse will look for the following templates in this directory:
#
# * HTML page for a confirmation step before redirecting back to the client
# with the login token: 'sso_redirect_confirm.html'.
#
# When rendering, this template is given three variables:
# * redirect_url: the URL the user is about to be redirected to. Needs
# manual escaping (see
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
#
# * display_url: the same as `redirect_url`, but with the query
# parameters stripped. The intention is to have a
# human-readable URL to show to users, not to use it as
# the final address to redirect to. Needs manual escaping
# (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
#
# * server_name: the homeserver's name.
#
# You can see the default templates at:
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
#
#template_dir: "res/templates"
"""

View File

@@ -32,17 +32,6 @@ from synapse.util import glob_to_regex
logger = logging.getLogger(__name__)
ACME_SUPPORT_ENABLED_WARN = """\
This server uses Synapse's built-in ACME support. Note that ACME v1 has been
deprecated by Let's Encrypt, and that Synapse doesn't currently support ACME v2,
which means that this feature will not work with Synapse installs set up after
November 2019, and that it may stop working on June 2020 for installs set up
before that date.
For more info and alternative solutions, see
https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
--------------------------------------------------------------------------------"""
class TlsConfig(Config):
section = "tls"
@@ -55,9 +44,6 @@ class TlsConfig(Config):
self.acme_enabled = acme_config.get("enabled", False)
if self.acme_enabled:
logger.warning(ACME_SUPPORT_ENABLED_WARN)
# hyperlink complains on py2 if this is not a Unicode
self.acme_url = six.text_type(
acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory")
@@ -123,8 +109,6 @@ class TlsConfig(Config):
fed_whitelist_entries = config.get(
"federation_certificate_verification_whitelist", []
)
if fed_whitelist_entries is None:
fed_whitelist_entries = []
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = [] # type: List[str]
@@ -260,7 +244,7 @@ class TlsConfig(Config):
crypto.FILETYPE_ASN1, self.tls_certificate
)
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
if sha256_fingerprint not in sha256_fingerprints:
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
@@ -376,11 +360,6 @@ class TlsConfig(Config):
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
# ACME v2. This means that this feature currently won't work with installs set
# up after November 2019. For more info, and alternative solutions, see
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
#
# Note that provisioning a certificate in this way requires port 80 to be
# routed to Synapse so that it can complete the http-01 ACME challenge.
# By default, if you enable ACME support, Synapse will attempt to listen on

View File

@@ -75,7 +75,7 @@ class ServerContextFactory(ContextFactory):
@implementer(IPolicyForHTTPS)
class FederationPolicyForHTTPS(object):
class ClientTLSOptionsFactory(object):
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
to remote servers for federation.
@@ -103,15 +103,15 @@ class FederationPolicyForHTTPS(object):
# let us do).
minTLS = _TLS_VERSION_MAP[config.federation_client_minimum_tls_version]
_verify_ssl = CertificateOptions(
self._verify_ssl = CertificateOptions(
trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
)
self._verify_ssl_context = _verify_ssl.getContext()
self._verify_ssl_context.set_info_callback(_context_info_cb)
self._verify_ssl_context = self._verify_ssl.getContext()
self._verify_ssl_context.set_info_callback(self._context_info_cb)
_no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS)
self._no_verify_ssl_context = _no_verify_ssl.getContext()
self._no_verify_ssl_context.set_info_callback(_context_info_cb)
self._no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS)
self._no_verify_ssl_context = self._no_verify_ssl.getContext()
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
def get_options(self, host: bytes):
@@ -136,6 +136,23 @@ class FederationPolicyForHTTPS(object):
return SSLClientConnectionCreator(host, ssl_context, should_verify)
@staticmethod
def _context_info_cb(ssl_connection, where, ret):
"""The 'information callback' for our openssl context object."""
# we assume that the app_data on the connection object has been set to
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
tls_protocol = ssl_connection.get_app_data()
try:
# ... we further assume that SSLClientConnectionCreator has set the
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
ssl_connection, where
)
except: # noqa: E722, taken from the twisted implementation
logger.exception("Error during info_callback")
f = Failure()
tls_protocol.failVerification(f)
def creatorForNetloc(self, hostname, port):
"""Implements the IPolicyForHTTPS interace so that this can be passed
directly to agents.
@@ -143,43 +160,6 @@ class FederationPolicyForHTTPS(object):
return self.get_options(hostname)
@implementer(IPolicyForHTTPS)
class RegularPolicyForHTTPS(object):
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
to remote servers, for other than federation.
Always uses the same OpenSSL context object, which uses the default OpenSSL CA
trust root.
"""
def __init__(self):
trust_root = platformTrust()
self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
self._ssl_context.set_info_callback(_context_info_cb)
def creatorForNetloc(self, hostname, port):
return SSLClientConnectionCreator(hostname, self._ssl_context, True)
def _context_info_cb(ssl_connection, where, ret):
"""The 'information callback' for our openssl context objects.
Note: Once this is set as the info callback on a Context object, the Context should
only be used with the SSLClientConnectionCreator.
"""
# we assume that the app_data on the connection object has been set to
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
tls_protocol = ssl_connection.get_app_data()
try:
# ... we further assume that SSLClientConnectionCreator has set the
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where)
except: # noqa: E722, taken from the twisted implementation
logger.exception("Error during info_callback")
f = Failure()
tls_protocol.failVerification(f)
@implementer(IOpenSSLClientConnectionCreator)
class SSLClientConnectionCreator(object):
"""Creates openssl connection objects for client connections.

View File

@@ -140,7 +140,7 @@ def compute_event_signature(
Returns:
a dictionary in the same format of an event's signatures field.
"""
redact_json = prune_event_dict(room_version, event_dict)
redact_json = prune_event_dict(event_dict)
redact_json.pop("age_ts", None)
redact_json.pop("unsigned", None)
if logger.isEnabledFor(logging.DEBUG):

View File

@@ -326,7 +326,9 @@ class Keyring(object):
verify_requests (list[VerifyJsonRequest]): list of verify requests
"""
remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
remaining_requests = set(
(rq for rq in verify_requests if not rq.key_ready.called)
)
@defer.inlineCallbacks
def do_iterations():
@@ -394,7 +396,7 @@ class Keyring(object):
results = yield fetcher.get_keys(missing_keys)
completed = []
completed = list()
for verify_request in remaining_requests:
server_name = verify_request.server_name

View File

@@ -137,7 +137,7 @@ def check(
raise AuthError(403, "This room has been marked as unfederatable.")
# 4. If type is m.room.aliases
if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth:
if event.type == EventTypes.Aliases:
# 4a. If event has no state_key, reject
if not event.is_state():
raise AuthError(403, "Alias event must be a state event")
@@ -152,8 +152,10 @@ def check(
)
# 4c. Otherwise, allow.
logger.debug("Allowing! %s", event)
return
# This is removed by https://github.com/matrix-org/matrix-doc/pull/2260
if room_version_obj.special_case_aliases_auth:
logger.debug("Allowing! %s", event)
return
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,16 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
from distutils.util import strtobool
from typing import Dict, Optional, Type
import six
from unpaddedbase64 import encode_base64
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.api.errors import UnsupportedRoomVersionError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
from synapse.types import JsonDict
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
@@ -39,115 +37,34 @@ from synapse.util.frozenutils import freeze
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
class DictProperty:
"""An object property which delegates to the `_dict` within its parent object."""
__slots__ = ["key"]
def __init__(self, key: str):
self.key = key
def __get__(self, instance, owner=None):
# if the property is accessed as a class property rather than an instance
# property, return the property itself rather than the value
if instance is None:
return self
try:
return instance._dict[self.key]
except KeyError as e1:
# We want this to look like a regular attribute error (mostly so that
# hasattr() works correctly), so we convert the KeyError into an
# AttributeError.
#
# To exclude the KeyError from the traceback, we explicitly
# 'raise from e1.__context__' (which is better than 'raise from None',
# becuase that would omit any *earlier* exceptions).
#
raise AttributeError(
"'%s' has no '%s' property" % (type(instance), self.key)
) from e1.__context__
def __set__(self, instance, v):
instance._dict[self.key] = v
def __delete__(self, instance):
try:
del instance._dict[self.key]
except KeyError as e1:
raise AttributeError(
"'%s' has no '%s' property" % (type(instance), self.key)
) from e1.__context__
class DefaultDictProperty(DictProperty):
"""An extension of DictProperty which provides a default if the property is
not present in the parent's _dict.
Note that this means that hasattr() on the property always returns True.
"""
__slots__ = ["default"]
def __init__(self, key, default):
super().__init__(key)
self.default = default
def __get__(self, instance, owner=None):
if instance is None:
return self
return instance._dict.get(self.key, self.default)
class _EventInternalMetadata(object):
__slots__ = ["_dict"]
def __init__(self, internal_metadata_dict):
self.__dict__ = dict(internal_metadata_dict)
def __init__(self, internal_metadata_dict: JsonDict):
# we have to copy the dict, because it turns out that the same dict is
# reused. TODO: fix that
self._dict = dict(internal_metadata_dict)
def get_dict(self):
return dict(self.__dict__)
outlier = DictProperty("outlier") # type: bool
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
recheck_redaction = DictProperty("recheck_redaction") # type: bool
soft_failed = DictProperty("soft_failed") # type: bool
proactively_send = DictProperty("proactively_send") # type: bool
redacted = DictProperty("redacted") # type: bool
txn_id = DictProperty("txn_id") # type: str
token_id = DictProperty("token_id") # type: str
stream_ordering = DictProperty("stream_ordering") # type: int
def is_outlier(self):
return getattr(self, "outlier", False)
# XXX: These are set by StreamWorkerStore._set_before_and_after.
# I'm pretty sure that these are never persisted to the database, so shouldn't
# be here
before = DictProperty("before") # type: str
after = DictProperty("after") # type: str
order = DictProperty("order") # type: int
def get_dict(self) -> JsonDict:
return dict(self._dict)
def is_outlier(self) -> bool:
return self._dict.get("outlier", False)
def is_out_of_band_membership(self) -> bool:
def is_out_of_band_membership(self):
"""Whether this is an out of band membership, like an invite or an invite
rejection. This is needed as those events are marked as outliers, but
they still need to be processed as if they're new events (e.g. updating
invite state in the database, relaying to clients, etc).
"""
return self._dict.get("out_of_band_membership", False)
return getattr(self, "out_of_band_membership", False)
def get_send_on_behalf_of(self) -> Optional[str]:
def get_send_on_behalf_of(self):
"""Whether this server should send the event on behalf of another server.
This is used by the federation "send_join" API to forward the initial join
event for a server in the room.
returns a str with the name of the server this event is sent on behalf of.
"""
return self._dict.get("send_on_behalf_of")
return getattr(self, "send_on_behalf_of", None)
def need_to_check_redaction(self) -> bool:
def need_to_check_redaction(self):
"""Whether the redaction event needs to be rechecked when fetching
from the database.
@@ -160,9 +77,9 @@ class _EventInternalMetadata(object):
Returns:
bool
"""
return self._dict.get("recheck_redaction", False)
return getattr(self, "recheck_redaction", False)
def is_soft_failed(self) -> bool:
def is_soft_failed(self):
"""Whether the event has been soft failed.
Soft failed events should be handled as usual, except:
@@ -174,7 +91,7 @@ class _EventInternalMetadata(object):
Returns:
bool
"""
return self._dict.get("soft_failed", False)
return getattr(self, "soft_failed", False)
def should_proactively_send(self):
"""Whether the event, if ours, should be sent to other clients and
@@ -186,7 +103,7 @@ class _EventInternalMetadata(object):
Returns:
bool
"""
return self._dict.get("proactively_send", True)
return getattr(self, "proactively_send", True)
def is_redacted(self):
"""Whether the event has been redacted.
@@ -197,53 +114,82 @@ class _EventInternalMetadata(object):
Returns:
bool
"""
return self._dict.get("redacted", False)
return getattr(self, "redacted", False)
class EventBase(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def format_version(self) -> int:
"""The EventFormatVersion implemented by this event"""
...
_SENTINEL = object()
def _event_dict_property(key, default=_SENTINEL):
"""Creates a new property for the given key that delegates access to
`self._event_dict`.
The default is used if the key is missing from the `_event_dict`, if given,
otherwise an AttributeError will be raised.
Note: If a default is given then `hasattr` will always return true.
"""
# We want to be able to use hasattr with the event dict properties.
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
# we need to transform the KeyError into an AttributeError
def getter_raises(self):
try:
return self._event_dict[key]
except KeyError:
raise AttributeError(key)
def getter_default(self):
return self._event_dict.get(key, default)
def setter(self, v):
try:
self._event_dict[key] = v
except KeyError:
raise AttributeError(key)
def delete(self):
try:
del self._event_dict[key]
except KeyError:
raise AttributeError(key)
if default is _SENTINEL:
# No default given, so use the getter that raises
return property(getter_raises, setter, delete)
else:
return property(getter_default, setter, delete)
class EventBase(object):
def __init__(
self,
event_dict: JsonDict,
room_version: RoomVersion,
signatures: Dict[str, Dict[str, str]],
unsigned: JsonDict,
internal_metadata_dict: JsonDict,
rejected_reason: Optional[str],
event_dict,
signatures={},
unsigned={},
internal_metadata_dict={},
rejected_reason=None,
):
assert room_version.event_format == self.format_version
self.room_version = room_version
self.signatures = signatures
self.unsigned = unsigned
self.rejected_reason = rejected_reason
self._dict = event_dict
self._event_dict = event_dict
self.internal_metadata = _EventInternalMetadata(internal_metadata_dict)
auth_events = DictProperty("auth_events")
depth = DictProperty("depth")
content = DictProperty("content")
hashes = DictProperty("hashes")
origin = DictProperty("origin")
origin_server_ts = DictProperty("origin_server_ts")
prev_events = DictProperty("prev_events")
redacts = DefaultDictProperty("redacts", None)
room_id = DictProperty("room_id")
sender = DictProperty("sender")
state_key = DictProperty("state_key")
type = DictProperty("type")
user_id = DictProperty("sender")
@property
def event_id(self) -> str:
raise NotImplementedError()
auth_events = _event_dict_property("auth_events")
depth = _event_dict_property("depth")
content = _event_dict_property("content")
hashes = _event_dict_property("hashes")
origin = _event_dict_property("origin")
origin_server_ts = _event_dict_property("origin_server_ts")
prev_events = _event_dict_property("prev_events")
redacts = _event_dict_property("redacts", None)
room_id = _event_dict_property("room_id")
sender = _event_dict_property("sender")
user_id = _event_dict_property("sender")
@property
def membership(self):
@@ -253,13 +199,13 @@ class EventBase(metaclass=abc.ABCMeta):
return hasattr(self, "state_key") and self.state_key is not None
def get_dict(self) -> JsonDict:
d = dict(self._dict)
d = dict(self._event_dict)
d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)})
return d
def get(self, key, default=None):
return self._dict.get(key, default)
return self._event_dict.get(key, default)
def get_internal_metadata_dict(self):
return self.internal_metadata.get_dict()
@@ -281,16 +227,16 @@ class EventBase(metaclass=abc.ABCMeta):
raise AttributeError("Unrecognized attribute %s" % (instance,))
def __getitem__(self, field):
return self._dict[field]
return self._event_dict[field]
def __contains__(self, field):
return field in self._dict
return field in self._event_dict
def items(self):
return list(self._dict.items())
return list(self._event_dict.items())
def keys(self):
return six.iterkeys(self._dict)
return six.iterkeys(self._event_dict)
def prev_event_ids(self):
"""Returns the list of prev event IDs. The order matches the order
@@ -314,13 +260,7 @@ class EventBase(metaclass=abc.ABCMeta):
class FrozenEvent(EventBase):
format_version = EventFormatVersions.V1 # All events of this type are V1
def __init__(
self,
event_dict: JsonDict,
room_version: RoomVersion,
internal_metadata_dict: JsonDict = {},
rejected_reason: Optional[str] = None,
):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
# Signatures is a dict of dicts, and this is faster than doing a
@@ -341,21 +281,19 @@ class FrozenEvent(EventBase):
else:
frozen_dict = event_dict
self._event_id = event_dict["event_id"]
self.event_id = event_dict["event_id"]
self.type = event_dict["type"]
if "state_key" in event_dict:
self.state_key = event_dict["state_key"]
super().__init__(
super(FrozenEvent, self).__init__(
frozen_dict,
room_version=room_version,
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
@property
def event_id(self) -> str:
return self._event_id
def __str__(self):
return self.__repr__()
@@ -370,13 +308,7 @@ class FrozenEvent(EventBase):
class FrozenEventV2(EventBase):
format_version = EventFormatVersions.V2 # All events of this type are V2
def __init__(
self,
event_dict: JsonDict,
room_version: RoomVersion,
internal_metadata_dict: JsonDict = {},
rejected_reason: Optional[str] = None,
):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
# Signatures is a dict of dicts, and this is faster than doing a
@@ -400,10 +332,12 @@ class FrozenEventV2(EventBase):
frozen_dict = event_dict
self._event_id = None
self.type = event_dict["type"]
if "state_key" in event_dict:
self.state_key = event_dict["state_key"]
super().__init__(
super(FrozenEventV2, self).__init__(
frozen_dict,
room_version=room_version,
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
@@ -470,7 +404,28 @@ class FrozenEventV3(FrozenEventV2):
return self._event_id
def _event_type_from_format_version(format_version: int) -> Type[EventBase]:
def room_version_to_event_format(room_version):
"""Converts a room version string to the event format
Args:
room_version (str)
Returns:
int
Raises:
UnsupportedRoomVersionError if the room version is unknown
"""
v = KNOWN_ROOM_VERSIONS.get(room_version)
if not v:
# this can happen if support is withdrawn for a room version
raise UnsupportedRoomVersionError()
return v.event_format
def event_type_from_format_version(format_version):
"""Returns the python type to use to construct an Event object for the
given event format version.
@@ -490,14 +445,3 @@ def _event_type_from_format_version(format_version: int) -> Type[EventBase]:
return FrozenEventV3
else:
raise Exception("No event format %r" % (format_version,))
def make_event_from_dict(
event_dict: JsonDict,
room_version: RoomVersion = RoomVersions.V1,
internal_metadata_dict: JsonDict = {},
rejected_reason: Optional[str] = None,
) -> EventBase:
"""Construct an EventBase from the given event dict"""
event_type = _event_type_from_format_version(room_version.event_format)
return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason)

View File

@@ -28,7 +28,11 @@ from synapse.api.room_versions import (
RoomVersion,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.events import (
EventBase,
_EventInternalMetadata,
event_type_from_format_version,
)
from synapse.types import EventID, JsonDict
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -252,8 +256,8 @@ def create_local_event_from_event_dict(
event_dict.setdefault("signatures", {})
add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
return make_event_from_dict(
event_dict, room_version, internal_metadata_dict=internal_metadata_dict
return event_type_from_format_version(format_version)(
event_dict, internal_metadata_dict=internal_metadata_dict
)

View File

@@ -15,17 +15,12 @@
# limitations under the License.
import inspect
from typing import Dict
from synapse.spam_checker_api import SpamCheckerApi
MYPY = False
if MYPY:
import synapse.server
class SpamChecker(object):
def __init__(self, hs: "synapse.server.HomeServer"):
def __init__(self, hs):
self.spam_checker = None
module = None
@@ -45,7 +40,7 @@ class SpamChecker(object):
else:
self.spam_checker = module(config=config)
def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool:
def check_event_for_spam(self, event):
"""Checks if a given event is considered "spammy" by this server.
If the server considers an event spammy, then it will be rejected if
@@ -53,30 +48,26 @@ class SpamChecker(object):
users receive a blank event.
Args:
event: the event to be checked
event (synapse.events.EventBase): the event to be checked
Returns:
True if the event is spammy.
bool: True if the event is spammy.
"""
if self.spam_checker is None:
return False
return self.spam_checker.check_event_for_spam(event)
def user_may_invite(
self, inviter_userid: str, invitee_userid: str, room_id: str
) -> bool:
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
"""Checks if a given user may send an invite
If this method returns false, the invite will be rejected.
Args:
inviter_userid: The user ID of the sender of the invitation
invitee_userid: The user ID targeted in the invitation
room_id: The room ID
userid (string): The sender's user ID
Returns:
True if the user may send an invite, otherwise False
bool: True if the user may send an invite, otherwise False
"""
if self.spam_checker is None:
return True
@@ -85,78 +76,52 @@ class SpamChecker(object):
inviter_userid, invitee_userid, room_id
)
def user_may_create_room(self, userid: str) -> bool:
def user_may_create_room(self, userid):
"""Checks if a given user may create a room
If this method returns false, the creation request will be rejected.
Args:
userid: The ID of the user attempting to create a room
userid (string): The sender's user ID
Returns:
True if the user may create a room, otherwise False
bool: True if the user may create a room, otherwise False
"""
if self.spam_checker is None:
return True
return self.spam_checker.user_may_create_room(userid)
def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
def user_may_create_room_alias(self, userid, room_alias):
"""Checks if a given user may create a room alias
If this method returns false, the association request will be rejected.
Args:
userid: The ID of the user attempting to create a room alias
room_alias: The alias to be created
userid (string): The sender's user ID
room_alias (string): The alias to be created
Returns:
True if the user may create a room alias, otherwise False
bool: True if the user may create a room alias, otherwise False
"""
if self.spam_checker is None:
return True
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
def user_may_publish_room(self, userid: str, room_id: str) -> bool:
def user_may_publish_room(self, userid, room_id):
"""Checks if a given user may publish a room to the directory
If this method returns false, the publish request will be rejected.
Args:
userid: The user ID attempting to publish the room
room_id: The ID of the room that would be published
userid (string): The sender's user ID
room_id (string): The ID of the room that would be published
Returns:
True if the user may publish the room, otherwise False
bool: True if the user may publish the room, otherwise False
"""
if self.spam_checker is None:
return True
return self.spam_checker.user_may_publish_room(userid, room_id)
def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool:
"""Checks if a user ID or display name are considered "spammy" by this server.
If the server considers a username spammy, then it will not be included in
user directory results.
Args:
user_profile: The user information to check, it contains the keys:
* user_id
* display_name
* avatar_url
Returns:
True if the user is spammy.
"""
if self.spam_checker is None:
return False
# For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering.
checker = getattr(self.spam_checker, "check_username_for_spam", None)
if not checker:
return False
# Make a copy of the user profile object to ensure the spam checker
# cannot modify it.
return checker(user_profile.copy())

View File

@@ -74,16 +74,15 @@ class ThirdPartyEventRules(object):
is_requester_admin (bool): If the requester is an admin
Returns:
defer.Deferred[bool]: Whether room creation is allowed or denied.
defer.Deferred
"""
if self.third_party_rules is None:
return True
return
ret = yield self.third_party_rules.on_create_room(
yield self.third_party_rules.on_create_room(
requester, config, is_requester_admin
)
return ret
@defer.inlineCallbacks
def check_threepid_can_be_invited(self, medium, address, room_id):

View File

@@ -23,7 +23,6 @@ from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.room_versions import RoomVersion
from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
@@ -36,20 +35,26 @@ from . import EventBase
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
def prune_event(event: EventBase) -> EventBase:
def prune_event(event):
""" Returns a pruned version of the given event, which removes all keys we
don't know about or think could potentially be dodgy.
This is used when we "redact" an event. We want to remove all fields that
the user has specified, but we do want to keep necessary information like
type, state_key etc.
Args:
event (FrozenEvent)
Returns:
FrozenEvent
"""
pruned_event_dict = prune_event_dict(event.room_version, event.get_dict())
pruned_event_dict = prune_event_dict(event.get_dict())
from . import make_event_from_dict
from . import event_type_from_format_version
pruned_event = make_event_from_dict(
pruned_event_dict, event.room_version, event.internal_metadata.get_dict()
pruned_event = event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
# Mark the event as redacted
@@ -58,12 +63,15 @@ def prune_event(event: EventBase) -> EventBase:
return pruned_event
def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
Args:
event_dict (dict)
Returns:
A copy of the pruned event dict
dict: A copy of the pruned event dict
"""
allowed_keys = [
@@ -110,7 +118,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
"kick",
"redact",
)
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,32 +14,27 @@
# limitations under the License.
import logging
from collections import namedtuple
from typing import Iterable, List
import six
from twisted.internet import defer
from twisted.internet.defer import Deferred, DeferredList
from twisted.python.failure import Failure
from twisted.internet.defer import DeferredList
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
from synapse.crypto.event_signing import check_event_content_hash
from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events import event_type_from_format_version
from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
LoggingContext,
PreserveLoggingContext,
make_deferred_yieldable,
preserve_fn,
)
from synapse.types import JsonDict, get_domain_from_id
from synapse.types import get_domain_from_id
from synapse.util import unwrapFirstError
logger = logging.getLogger(__name__)
@@ -55,23 +49,92 @@ class FederationBase(object):
self.store = hs.get_datastore()
self._clock = hs.get_clock()
def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred:
@defer.inlineCallbacks
def _check_sigs_and_hash_and_fetch(
self, origin, pdus, room_version, outlier=False, include_none=False
):
"""Takes a list of PDUs and checks the signatures and hashs of each
one. If a PDU fails its signature check then we check if we have it in
the database and if not then request if from the originating server of
that PDU.
If a PDU fails its content hash check then it is redacted.
The given list of PDUs are not modified, instead the function returns
a new list.
Args:
origin (str)
pdu (list)
room_version (str)
outlier (bool): Whether the events are outliers or not
include_none (str): Whether to include None in the returned list
for events that have failed their checks
Returns:
Deferred : A list of PDUs that have valid signatures and hashes.
"""
deferreds = self._check_sigs_and_hashes(room_version, pdus)
@defer.inlineCallbacks
def handle_check_result(pdu, deferred):
try:
res = yield make_deferred_yieldable(deferred)
except SynapseError:
res = None
if not res:
# Check local db.
res = yield self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
)
if not res and pdu.origin != origin:
try:
res = yield self.get_pdu(
destinations=[pdu.origin],
event_id=pdu.event_id,
room_version=room_version,
outlier=outlier,
timeout=10000,
)
except SynapseError:
pass
if not res:
logger.warning(
"Failed to find copy of %s with valid signature", pdu.event_id
)
return res
handle = preserve_fn(handle_check_result)
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
valid_pdus = yield make_deferred_yieldable(
defer.gatherResults(deferreds2, consumeErrors=True)
).addErrback(unwrapFirstError)
if include_none:
return valid_pdus
else:
return [p for p in valid_pdus if p]
def _check_sigs_and_hash(self, room_version, pdu):
return make_deferred_yieldable(
self._check_sigs_and_hashes(room_version, [pdu])[0]
)
def _check_sigs_and_hashes(
self, room_version: str, pdus: List[EventBase]
) -> List[Deferred]:
def _check_sigs_and_hashes(self, room_version, pdus):
"""Checks that each of the received events is correctly signed by the
sending server.
Args:
room_version: The room version of the PDUs
pdus: the events to be checked
room_version (str): The room version of the PDUs
pdus (list[FrozenEvent]): the events to be checked
Returns:
For each input event, a deferred which:
list[Deferred]: for each input event, a deferred which:
* returns the original event if the checks pass
* returns a redacted version of the event (if the signature
matched but the hash did not)
@@ -82,7 +145,7 @@ class FederationBase(object):
ctx = LoggingContext.current_context()
def callback(_, pdu: EventBase):
def callback(_, pdu):
with PreserveLoggingContext(ctx):
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
@@ -119,7 +182,7 @@ class FederationBase(object):
return pdu
def errback(failure: Failure, pdu: EventBase):
def errback(failure, pdu):
failure.trap(SynapseError)
with PreserveLoggingContext(ctx):
logger.warning(
@@ -145,18 +208,16 @@ class PduToCheckSig(
pass
def _check_sigs_on_pdus(
keyring: Keyring, room_version: str, pdus: Iterable[EventBase]
) -> List[Deferred]:
def _check_sigs_on_pdus(keyring, room_version, pdus):
"""Check that the given events are correctly signed
Args:
keyring: keyring object to do the checks
room_version: the room version of the PDUs
pdus: the events to be checked
keyring (synapse.crypto.Keyring): keyring object to do the checks
room_version (str): the room version of the PDUs
pdus (Collection[EventBase]): the events to be checked
Returns:
A Deferred for each event in pdus, which will either succeed if
List[Deferred]: a Deferred for each event in pdus, which will either succeed if
the signatures are valid, or fail (with a SynapseError) if not.
"""
@@ -261,7 +322,7 @@ def _check_sigs_on_pdus(
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
def _flatten_deferred_list(deferreds):
"""Given a list of deferreds, either return the single deferred,
combine into a DeferredList, or return an already resolved deferred.
"""
@@ -273,7 +334,7 @@ def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
return defer.succeed(None)
def _is_invite_via_3pid(event: EventBase) -> bool:
def _is_invite_via_3pid(event):
return (
event.type == EventTypes.Member
and event.membership == Membership.INVITE
@@ -281,15 +342,16 @@ def _is_invite_via_3pid(event: EventBase) -> bool:
)
def event_from_pdu_json(
pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
) -> EventBase:
"""Construct an EventBase from an event json received over federation
def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
"""Construct a FrozenEvent from an event json received over federation
Args:
pdu_json: pdu as received over federation
room_version: The version of the room this event belongs to
outlier: True to mark this event as an outlier
pdu_json (object): pdu as received over federation
event_format_version (int): The event format version
outlier (bool): True to mark this event as an outlier
Returns:
FrozenEvent
Raises:
SynapseError: if the pdu is missing required fields or is otherwise
@@ -308,7 +370,8 @@ def event_from_pdu_json(
elif depth > MAX_DEPTH:
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
event = make_event_from_dict(pdu_json, room_version)
event = event_type_from_format_version(event_format_version)(pdu_json)
event.internal_metadata.outlier = outlier
return event

View File

@@ -17,23 +17,11 @@
import copy
import itertools
import logging
from typing import (
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
)
from typing import Dict, Iterable
from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.defer import Deferred
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
@@ -47,14 +35,12 @@ from synapse.api.errors import (
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
RoomVersions,
)
from synapse.events import EventBase, builder
from synapse.events import builder, room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.logging.context import make_deferred_yieldable, preserve_fn
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.utils import log_function
from synapse.types import JsonDict
from synapse.util import unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
@@ -66,8 +52,6 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
PDU_RETRY_TIME_MS = 1 * 60 * 1000
T = TypeVar("T")
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
@@ -186,55 +170,56 @@ class FederationClient(FederationBase):
sent_queries_counter.labels("client_one_time_keys").inc()
return self.transport_layer.claim_client_keys(destination, content, timeout)
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
) -> Optional[List[EventBase]]:
"""Requests some more historic PDUs for the given room from the
@defer.inlineCallbacks
@log_function
def backfill(self, dest, room_id, limit, extremities):
"""Requests some more historic PDUs for the given context from the
given destination server.
Args:
dest (str): The remote homeserver to ask.
room_id (str): The room_id to backfill.
limit (int): The maximum number of events to return.
extremities (list): our current backwards extremities, to backfill from
limit (int): The maximum number of PDUs to return.
extremities (list): List of PDU id and origins of the first pdus
we have seen from the context
Returns:
Deferred: Results in the received PDUs.
"""
logger.debug("backfill extrem=%s", extremities)
# If there are no extremities then we've (probably) reached the start.
# If there are no extremeties then we've (probably) reached the start.
if not extremities:
return None
return
transaction_data = await self.transport_layer.backfill(
transaction_data = yield self.transport_layer.backfill(
dest, room_id, extremities, limit
)
logger.debug("backfill transaction_data=%r", transaction_data)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
pdus = [
event_from_pdu_json(p, room_version, outlier=False)
event_from_pdu_json(p, format_ver, outlier=False)
for p in transaction_data["pdus"]
]
# FIXME: We should handle signature failures more gracefully.
pdus[:] = await make_deferred_yieldable(
pdus[:] = yield make_deferred_yieldable(
defer.gatherResults(
self._check_sigs_and_hashes(room_version.identifier, pdus),
consumeErrors=True,
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
).addErrback(unwrapFirstError)
)
return pdus
async def get_pdu(
self,
destinations: Iterable[str],
event_id: str,
room_version: RoomVersion,
outlier: bool = False,
timeout: Optional[int] = None,
) -> Optional[EventBase]:
@defer.inlineCallbacks
@log_function
def get_pdu(
self, destinations, event_id, room_version, outlier=False, timeout=None
):
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -242,17 +227,18 @@ class FederationClient(FederationBase):
one succeeds.
Args:
destinations: Which homeservers to query
event_id: event to fetch
room_version: version of the room
outlier: Indicates whether the PDU is an `outlier`, i.e. if
destinations (list): Which homeservers to query
event_id (str): event to fetch
room_version (str): version of the room
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
it's from an arbitary point in the context as opposed to part
of the current block of PDUs. Defaults to `False`
timeout: How long to try (in ms) each destination for before
timeout (int): How long to try (in ms) each destination for before
moving to the next destination. None indicates no timeout.
Returns:
The requested PDU, or None if we were unable to find it.
Deferred: Results in the requested PDU, or None if we were unable to find
it.
"""
# TODO: Rate limit the number of times we try and get the same event.
@@ -263,6 +249,8 @@ class FederationClient(FederationBase):
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
format_ver = room_version_to_event_format(room_version)
signed_pdu = None
for destination in destinations:
now = self._clock.time_msec()
@@ -271,7 +259,7 @@ class FederationClient(FederationBase):
continue
try:
transaction_data = await self.transport_layer.get_event(
transaction_data = yield self.transport_layer.get_event(
destination, event_id, timeout=timeout
)
@@ -283,17 +271,15 @@ class FederationClient(FederationBase):
)
pdu_list = [
event_from_pdu_json(p, room_version, outlier=outlier)
event_from_pdu_json(p, format_ver, outlier=outlier)
for p in transaction_data["pdus"]
] # type: List[EventBase]
]
if pdu_list and pdu_list[0]:
pdu = pdu_list[0]
# Check signatures are correct.
signed_pdu = await self._check_sigs_and_hash(
room_version.identifier, pdu
)
signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
break
@@ -323,16 +309,15 @@ class FederationClient(FederationBase):
return signed_pdu
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
) -> Tuple[List[str], List[str]]:
@defer.inlineCallbacks
def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
"""Calls the /state_ids endpoint to fetch the state at a particular point
in the room, and the auth events for the given event
Returns:
a tuple of (state event_ids, auth event_ids)
Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
"""
result = await self.transport_layer.get_room_state_ids(
result = yield self.transport_layer.get_room_state_ids(
destination, room_id, event_id=event_id
)
@@ -346,116 +331,37 @@ class FederationClient(FederationBase):
return state_event_ids, auth_event_ids
async def _check_sigs_and_hash_and_fetch(
self,
origin: str,
pdus: List[EventBase],
room_version: str,
outlier: bool = False,
include_none: bool = False,
) -> List[EventBase]:
"""Takes a list of PDUs and checks the signatures and hashs of each
one. If a PDU fails its signature check then we check if we have it in
the database and if not then request if from the originating server of
that PDU.
@defer.inlineCallbacks
@log_function
def get_event_auth(self, destination, room_id, event_id):
res = yield self.transport_layer.get_event_auth(destination, room_id, event_id)
If a PDU fails its content hash check then it is redacted.
The given list of PDUs are not modified, instead the function returns
a new list.
Args:
origin
pdu
room_version
outlier: Whether the events are outliers or not
include_none: Whether to include None in the returned list
for events that have failed their checks
Returns:
Deferred : A list of PDUs that have valid signatures and hashes.
"""
deferreds = self._check_sigs_and_hashes(room_version, pdus)
@defer.inlineCallbacks
def handle_check_result(pdu: EventBase, deferred: Deferred):
try:
res = yield make_deferred_yieldable(deferred)
except SynapseError:
res = None
if not res:
# Check local db.
res = yield self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
)
if not res and pdu.origin != origin:
try:
res = yield defer.ensureDeferred(
self.get_pdu(
destinations=[pdu.origin],
event_id=pdu.event_id,
room_version=room_version, # type: ignore
outlier=outlier,
timeout=10000,
)
)
except SynapseError:
pass
if not res:
logger.warning(
"Failed to find copy of %s with valid signature", pdu.event_id
)
return res
handle = preserve_fn(handle_check_result)
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
valid_pdus = await make_deferred_yieldable(
defer.gatherResults(deferreds2, consumeErrors=True)
).addErrback(unwrapFirstError)
if include_none:
return valid_pdus
else:
return [p for p in valid_pdus if p]
async def get_event_auth(self, destination, room_id, event_id):
res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(p, room_version, outlier=True)
for p in res["auth_chain"]
event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"]
]
signed_auth = await self._check_sigs_and_hash_and_fetch(
destination, auth_chain, outlier=True, room_version=room_version.identifier
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination, auth_chain, outlier=True, room_version=room_version
)
signed_auth.sort(key=lambda e: e.depth)
return signed_auth
async def _try_destination_list(
self,
description: str,
destinations: Iterable[str],
callback: Callable[[str], Awaitable[T]],
) -> T:
@defer.inlineCallbacks
def _try_destination_list(self, description, destinations, callback):
"""Try an operation on a series of servers, until it succeeds
Args:
description: description of the operation we're doing, for logging
description (unicode): description of the operation we're doing, for logging
destinations: list of server_names to try
destinations (Iterable[unicode]): list of server_names to try
callback: Function to run for each server. Passed a single
argument: the server_name to try.
callback (callable): Function to run for each server. Passed a single
argument: the server_name to try. May return a deferred.
If the callback raises a CodeMessageException with a 300/400 code,
attempts to perform the operation stop immediately and the exception is
@@ -466,7 +372,7 @@ class FederationClient(FederationBase):
suppressed if the exception is an InvalidResponseError.
Returns:
The result of callback, if it succeeds
The [Deferred] result of callback, if it succeeds
Raises:
SynapseError if the chosen remote server returns a 300/400 code, or
@@ -477,7 +383,7 @@ class FederationClient(FederationBase):
continue
try:
res = await callback(destination)
res = yield callback(destination)
return res
except InvalidResponseError as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
@@ -496,12 +402,12 @@ class FederationClient(FederationBase):
)
except Exception:
logger.warning(
"Failed to %s via %s", description, destination, exc_info=True
"Failed to %s via %s", description, destination, exc_info=1
)
raise SynapseError(502, "Failed to %s via any server" % (description,))
async def make_membership_event(
def make_membership_event(
self,
destinations: Iterable[str],
room_id: str,
@@ -509,7 +415,7 @@ class FederationClient(FederationBase):
membership: str,
content: dict,
params: Dict[str, str],
) -> Tuple[str, EventBase, RoomVersion]:
):
"""
Creates an m.room.member event, with context, without participating in the room.
@@ -530,19 +436,19 @@ class FederationClient(FederationBase):
content: Any additional data to put into the content field of the
event.
params: Query parameters to include in the request.
Returns:
Return:
Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of
`(origin, event, room_version)` where origin is the remote
homeserver which generated the event, and room_version is the
version of the room.
Raises:
UnsupportedRoomVersionError: if remote responds with
a room version we don't understand.
Fails with a `UnsupportedRoomVersionError` if remote responds with
a room version we don't understand.
SynapseError: if the chosen remote server returns a 300/400 code.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
RuntimeError: if no servers were reachable.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
valid_memberships = {Membership.JOIN, Membership.LEAVE}
if membership not in valid_memberships:
@@ -551,8 +457,9 @@ class FederationClient(FederationBase):
% (membership, ",".join(valid_memberships))
)
async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
ret = await self.transport_layer.make_membership_event(
@defer.inlineCallbacks
def send_request(destination):
ret = yield self.transport_layer.make_membership_event(
destination, room_id, user_id, membership, params
)
@@ -585,83 +492,88 @@ class FederationClient(FederationBase):
event_dict=pdu_dict,
)
return destination, ev, room_version
return (destination, ev, room_version)
return await self._try_destination_list(
return self._try_destination_list(
"make_" + membership, destinations, send_request
)
async def send_join(
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
) -> Dict[str, Any]:
def send_join(self, destinations, pdu, event_format_version):
"""Sends a join event to one of a list of homeservers.
Doing so will cause the remote server to add the event to the graph,
and send the event out to the rest of the federation.
Args:
destinations: Candidate homeservers which are probably
destinations (str): Candidate homeservers which are probably
participating in the room.
pdu: event to be sent
room_version: the version of the room (according to the server that
did the make_join)
pdu (BaseEvent): event to be sent
event_format_version (int): The event format version
Returns:
a dict with members ``origin`` (a string
giving the server the event was sent to, ``state`` (?) and
Return:
Deferred: resolves to a dict with members ``origin`` (a string
giving the serer the event was sent to, ``state`` (?) and
``auth_chain``.
Raises:
SynapseError: if the chosen remote server returns a 300/400 code.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
RuntimeError: if no servers were reachable.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
async def send_request(destination) -> Dict[str, Any]:
content = await self._do_send_join(destination, pdu)
def check_authchain_validity(signed_auth_chain):
for e in signed_auth_chain:
if e.type == EventTypes.Create:
create_event = e
break
else:
raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,))
# the room version should be sane.
room_version = create_event.content.get("room_version", "1")
if room_version not in KNOWN_ROOM_VERSIONS:
# This shouldn't be possible, because the remote server should have
# rejected the join attempt during make_join.
raise InvalidResponseError(
"room appears to have unsupported version %s" % (room_version,)
)
@defer.inlineCallbacks
def send_request(destination):
content = yield self._do_send_join(destination, pdu)
logger.debug("Got content: %s", content)
state = [
event_from_pdu_json(p, room_version, outlier=True)
event_from_pdu_json(p, event_format_version, outlier=True)
for p in content.get("state", [])
]
auth_chain = [
event_from_pdu_json(p, room_version, outlier=True)
event_from_pdu_json(p, event_format_version, outlier=True)
for p in content.get("auth_chain", [])
]
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
create_event = None
room_version = None
for e in state:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
room_version = e.content.get(
"room_version", RoomVersions.V1.identifier
)
break
if create_event is None:
if room_version is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
# the room version should be sane.
create_room_version = create_event.content.get(
"room_version", RoomVersions.V1.identifier
)
if create_room_version != room_version.identifier:
# either the server that fulfilled the make_join, or the server that is
# handling the send_join, is lying.
raise InvalidResponseError(
"Unexpected room version %s in create event"
% (create_room_version,)
)
valid_pdus = await self._check_sigs_and_hash_and_fetch(
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
destination,
list(pdus.values()),
outlier=True,
room_version=room_version.identifier,
room_version=room_version,
)
valid_pdus_map = {p.event_id: p for p in valid_pdus}
@@ -685,17 +597,7 @@ class FederationClient(FederationBase):
for s in signed_state:
s.internal_metadata = copy.deepcopy(s.internal_metadata)
# double-check that the same create event has ended up in the auth chain
auth_chain_create_events = [
e.event_id
for e in signed_auth
if (e.type, e.state_key) == (EventTypes.Create, "")
]
if auth_chain_create_events != [create_event.event_id]:
raise InvalidResponseError(
"Unexpected create event(s) in auth chain: %s"
% (auth_chain_create_events,)
)
check_authchain_validity(signed_auth)
return {
"state": signed_state,
@@ -703,13 +605,14 @@ class FederationClient(FederationBase):
"origin": destination,
}
return await self._try_destination_list("send_join", destinations, send_request)
return self._try_destination_list("send_join", destinations, send_request)
async def _do_send_join(self, destination: str, pdu: EventBase):
@defer.inlineCallbacks
def _do_send_join(self, destination, pdu):
time_now = self._clock.time_msec()
try:
content = await self.transport_layer.send_join_v2(
content = yield self.transport_layer.send_join_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -731,7 +634,7 @@ class FederationClient(FederationBase):
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
resp = await self.transport_layer.send_join_v1(
resp = yield self.transport_layer.send_join_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -742,45 +645,51 @@ class FederationClient(FederationBase):
# content.
return resp[1]
async def send_invite(
self, destination: str, room_id: str, event_id: str, pdu: EventBase,
) -> EventBase:
room_version = await self.store.get_room_version(room_id)
@defer.inlineCallbacks
def send_invite(self, destination, room_id, event_id, pdu):
room_version = yield self.store.get_room_version_id(room_id)
content = await self._do_send_invite(destination, pdu, room_version)
content = yield self._do_send_invite(destination, pdu, room_version)
pdu_dict = content["event"]
logger.debug("Got response to send_invite: %s", pdu_dict)
pdu = event_from_pdu_json(pdu_dict, room_version)
room_version = yield self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(pdu_dict, format_ver)
# Check signatures are correct.
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
# FIXME: We should handle signature failures more gracefully.
return pdu
async def _do_send_invite(
self, destination: str, pdu: EventBase, room_version: RoomVersion
) -> JsonDict:
@defer.inlineCallbacks
def _do_send_invite(self, destination, pdu, room_version):
"""Actually sends the invite, first trying v2 API and falling back to
v1 API if necessary.
Args:
destination (str): Target server
pdu (FrozenEvent)
room_version (str)
Returns:
The event as a dict as returned by the remote server
dict: The event as a dict as returned by the remote server
"""
time_now = self._clock.time_msec()
try:
content = await self.transport_layer.send_invite_v2(
content = yield self.transport_layer.send_invite_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content={
"event": pdu.get_pdu_json(time_now),
"room_version": room_version.identifier,
"room_version": room_version,
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
},
)
@@ -798,7 +707,8 @@ class FederationClient(FederationBase):
# Otherwise, we assume that the remote server doesn't understand
# the v2 invite API. That's ok provided the room uses old-style event
# IDs.
if room_version.event_format != EventFormatVersions.V1:
v = KNOWN_ROOM_VERSIONS.get(room_version)
if v.event_format != EventFormatVersions.V1:
raise SynapseError(
400,
"User's homeserver does not support this room version",
@@ -812,7 +722,7 @@ class FederationClient(FederationBase):
# Didn't work, try v1 API.
# Note the v1 API returns a tuple of `(200, content)`
_, content = await self.transport_layer.send_invite_v1(
_, content = yield self.transport_layer.send_invite_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -820,7 +730,7 @@ class FederationClient(FederationBase):
)
return content
async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
def send_leave(self, destinations, pdu):
"""Sends a leave event to one of a list of homeservers.
Doing so will cause the remote server to add the event to the graph,
@@ -829,29 +739,34 @@ class FederationClient(FederationBase):
This is mostly useful to reject received invites.
Args:
destinations: Candidate homeservers which are probably
destinations (str): Candidate homeservers which are probably
participating in the room.
pdu: event to be sent
pdu (BaseEvent): event to be sent
Raises:
SynapseError if the chosen remote server returns a 300/400 code.
Return:
Deferred: resolves to None.
RuntimeError if no servers were reachable.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
async def send_request(destination: str) -> None:
content = await self._do_send_leave(destination, pdu)
@defer.inlineCallbacks
def send_request(destination):
content = yield self._do_send_leave(destination, pdu)
logger.debug("Got content: %s", content)
return None
return await self._try_destination_list(
"send_leave", destinations, send_request
)
return self._try_destination_list("send_leave", destinations, send_request)
async def _do_send_leave(self, destination, pdu):
@defer.inlineCallbacks
def _do_send_leave(self, destination, pdu):
time_now = self._clock.time_msec()
try:
content = await self.transport_layer.send_leave_v2(
content = yield self.transport_layer.send_leave_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -873,7 +788,7 @@ class FederationClient(FederationBase):
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
resp = await self.transport_layer.send_leave_v1(
resp = yield self.transport_layer.send_leave_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -905,33 +820,34 @@ class FederationClient(FederationBase):
third_party_instance_id=third_party_instance_id,
)
async def get_missing_events(
@defer.inlineCallbacks
def get_missing_events(
self,
destination: str,
room_id: str,
earliest_events_ids: Sequence[str],
latest_events: Iterable[EventBase],
limit: int,
min_depth: int,
timeout: int,
) -> List[EventBase]:
destination,
room_id,
earliest_events_ids,
latest_events,
limit,
min_depth,
timeout,
):
"""Tries to fetch events we are missing. This is called when we receive
an event without having received all of its ancestors.
Args:
destination
room_id
earliest_events_ids: List of event ids. Effectively the
destination (str)
room_id (str)
earliest_events_ids (list): List of event ids. Effectively the
events we expected to receive, but haven't. `get_missing_events`
should only return events that didn't happen before these.
latest_events: List of events we have received that we don't
latest_events (list): List of events we have received that we don't
have all previous events for.
limit: Maximum number of events to return.
min_depth: Minimum depth of events to return.
timeout: Max time to wait in ms
limit (int): Maximum number of events to return.
min_depth (int): Minimum depth of events tor return.
timeout (int): Max time to wait in ms
"""
try:
content = await self.transport_layer.get_missing_events(
content = yield self.transport_layer.get_missing_events(
destination=destination,
room_id=room_id,
earliest_events=earliest_events_ids,
@@ -941,14 +857,15 @@ class FederationClient(FederationBase):
timeout=timeout,
)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
events = [
event_from_pdu_json(e, room_version) for e in content.get("events", [])
event_from_pdu_json(e, format_ver) for e in content.get("events", [])
]
signed_events = await self._check_sigs_and_hash_and_fetch(
destination, events, outlier=False, room_version=room_version.identifier
signed_events = yield self._check_sigs_and_hash_and_fetch(
destination, events, outlier=False, room_version=room_version
)
except HttpResponseException as e:
if not e.code == 400:

View File

@@ -38,6 +38,7 @@ from synapse.api.errors import (
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
@@ -53,7 +54,7 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
from synapse.types import JsonDict, get_domain_from_id
from synapse.types import get_domain_from_id
from synapse.util import glob_to_regex, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
@@ -235,17 +236,24 @@ class FederationServer(FederationBase):
continue
try:
room_version = await self.store.get_room_version(room_id)
room_version = await self.store.get_room_version_id(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
except UnsupportedRoomVersionError as e:
try:
format_ver = room_version_to_event_format(room_version)
except UnsupportedRoomVersionError:
# this can happen if support for a given room version is withdrawn,
# so that we still get events for said room.
logger.info("Ignoring PDU: %s", e)
logger.info(
"Ignoring PDU for room %s with unknown version %s",
room_id,
room_version,
)
continue
event = event_from_pdu_json(p, room_version)
event = event_from_pdu_json(p, format_ver)
pdus_by_room.setdefault(room_id, []).append(event)
pdu_results = {}
@@ -296,12 +304,7 @@ class FederationServer(FederationBase):
async def _process_edu(edu_dict):
received_edus_counter.inc()
edu = Edu(
origin=origin,
destination=self.server_name,
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
)
edu = Edu(**edu_dict)
await self.registry.on_edu(edu.edu_type, origin, edu.content)
await concurrently_execute(
@@ -395,21 +398,20 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_invite_request(
self, origin: str, content: JsonDict, room_version_id: str
):
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version:
async def on_invite_request(self, origin, content, room_version):
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
pdu = event_from_pdu_json(content, room_version)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
pdu = await self._check_sigs_and_hash(room_version, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
@@ -417,15 +419,16 @@ class FederationServer(FederationBase):
async def on_send_join_request(self, origin, content, room_id):
logger.debug("on_send_join_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
pdu = event_from_pdu_json(content, room_version)
room_version = await self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
pdu = await self._check_sigs_and_hash(room_version, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
@@ -447,15 +450,16 @@ class FederationServer(FederationBase):
async def on_send_leave_request(self, origin, content, room_id):
logger.debug("on_send_leave_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
pdu = event_from_pdu_json(content, room_version)
room_version = await self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
pdu = await self._check_sigs_and_hash(room_version, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
@@ -470,6 +474,58 @@ class FederationServer(FederationBase):
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
async def on_query_auth_request(self, origin, content, room_id, event_id):
"""
Content is a dict with keys::
auth_chain (list): A list of events that give the auth chain.
missing (list): A list of event_ids indicating what the other
side (`origin`) think we're missing.
rejects (dict): A mapping from event_id to a 2-tuple of reason
string and a proof (or None) of why the event was rejected.
The keys of this dict give the list of events the `origin` has
rejected.
Args:
origin (str)
content (dict)
event_id (str)
Returns:
Deferred: Results in `dict` with the same format as `content`
"""
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version_id(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
]
signed_auth = await self._check_sigs_and_hash_and_fetch(
origin, auth_chain, outlier=True, room_version=room_version
)
ret = await self.handler.on_query_auth(
origin,
event_id,
room_id,
signed_auth,
content.get("rejects", []),
content.get("missing", []),
)
time_now = self._clock.time_msec()
send_content = {
"auth_chain": [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
"rejects": ret.get("rejects", []),
"missing": ret.get("missing", []),
}
return 200, send_content
@log_function
def on_query_client_keys(self, origin, content):
return self.on_query_request("client_keys", content)
@@ -517,7 +573,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
logger.debug(
logger.info(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d",
earliest_events,
@@ -530,11 +586,11 @@ class FederationServer(FederationBase):
)
if len(missing_events) < 5:
logger.debug(
logger.info(
"Returning %d events: %r", len(missing_events), missing_events
)
else:
logger.debug("Returning %d events", len(missing_events))
logger.info("Returning %d events", len(missing_events))
time_now = self._clock.time_msec()

View File

@@ -129,9 +129,9 @@ class FederationRemoteSendQueue(object):
for key in keys[:i]:
del self.presence_changed[key]
user_ids = {
user_ids = set(
user_id for uids in self.presence_changed.values() for user_id in uids
}
)
keys = self.presence_destinations.keys()
i = self.presence_destinations.bisect_left(position_to_delete)

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import logging
from typing import Dict, Hashable, Iterable, List, Optional, Set
from six import itervalues
@@ -24,7 +23,6 @@ from twisted.internet import defer
import synapse
import synapse.metrics
from synapse.events import EventBase
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
from synapse.federation.sender.transaction_manager import TransactionManager
from synapse.federation.units import Edu
@@ -41,8 +39,6 @@ from synapse.metrics import (
events_processed_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
from synapse.types import ReadReceipt
from synapse.util.metrics import Measure, measure_func
logger = logging.getLogger(__name__)
@@ -72,7 +68,7 @@ class FederationSender(object):
self._transaction_manager = TransactionManager(hs)
# map from destination to PerDestinationQueue
self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue]
self._per_destination_queues = {} # type: dict[str, PerDestinationQueue]
LaterGauge(
"synapse_federation_transaction_queue_pending_destinations",
@@ -88,7 +84,7 @@ class FederationSender(object):
# Map of user_id -> UserPresenceState for all the pending presence
# to be sent out by user_id. Entries here get processed and put in
# pending_presence_by_dest
self.pending_presence = {} # type: Dict[str, UserPresenceState]
self.pending_presence = {}
LaterGauge(
"synapse_federation_transaction_queue_pending_pdus",
@@ -120,17 +116,20 @@ class FederationSender(object):
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room = (
{}
) # type: Dict[str, Set[PerDestinationQueue]]
) # type: dict[str, set[PerDestinationQueue]]
self._rr_txn_interval_per_room_ms = (
1000.0 / hs.config.federation_rr_transactions_per_room_per_second
1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
)
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
def _get_per_destination_queue(self, destination):
"""Get or create a PerDestinationQueue for the given destination
Args:
destination: server_name of remote server
destination (str): server_name of remote server
Returns:
PerDestinationQueue
"""
queue = self._per_destination_queues.get(destination)
if not queue:
@@ -138,7 +137,7 @@ class FederationSender(object):
self._per_destination_queues[destination] = queue
return queue
def notify_new_events(self, current_id: int) -> None:
def notify_new_events(self, current_id):
"""This gets called when we have some new events we might want to
send out to other servers.
"""
@@ -152,12 +151,28 @@ class FederationSender(object):
"process_event_queue_for_federation", self._process_event_queue_loop
)
async def _process_event_queue_loop(self) -> None:
@defer.inlineCallbacks
def _process_event_queue_loop(self):
loop_start_time = self.clock.time_msec()
try:
self._is_processing = True
while True:
last_token = await self.store.get_federation_out_pos("events")
next_token, events = await self.store.get_all_new_events_stream(
# if we've been going around this loop for a long time without
# catching up, deprioritise transaction transmission. This should mean
# that events get batched into fewer transactions, which is more
# efficient, and hence give us a chance to catch up
if (
self.clock.time_msec() - loop_start_time > 60 * 1000
and not self._transaction_manager.deprioritise_transmission
):
logger.warning(
"Event queue is getting behind: deprioritising transaction "
"transmission"
)
self._transaction_manager.deprioritise_transmission = True
last_token = yield self.store.get_federation_out_pos("events")
next_token, events = yield self.store.get_all_new_events_stream(
last_token, self._last_poked_id, limit=100
)
@@ -166,7 +181,8 @@ class FederationSender(object):
if not events and next_token >= self._last_poked_id:
break
async def handle_event(event: EventBase) -> None:
@defer.inlineCallbacks
def handle_event(event):
# Only send events for this server.
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
is_mine = self.is_mine_id(event.sender)
@@ -183,7 +199,7 @@ class FederationSender(object):
# Otherwise if the last member on a server in a room is
# banned then it won't receive the event because it won't
# be in the room after the ban.
destinations = await self.state.get_hosts_in_room_at_events(
destinations = yield self.state.get_hosts_in_room_at_events(
event.room_id, event_ids=event.prev_event_ids()
)
except Exception:
@@ -205,16 +221,17 @@ class FederationSender(object):
self._send_pdu(event, destinations)
async def handle_room_events(events: Iterable[EventBase]) -> None:
@defer.inlineCallbacks
def handle_room_events(events):
with Measure(self.clock, "handle_room_events"):
for event in events:
await handle_event(event)
yield handle_event(event)
events_by_room = {} # type: Dict[str, List[EventBase]]
events_by_room = {}
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
await make_deferred_yieldable(
yield make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
@@ -224,11 +241,11 @@ class FederationSender(object):
)
)
await self.store.update_federation_out_pos("events", next_token)
yield self.store.update_federation_out_pos("events", next_token)
if events:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
ts = yield self.store.get_received_ts(events[-1].event_id)
synapse.metrics.event_processing_lag.labels(
"federation_sender"
@@ -251,8 +268,11 @@ class FederationSender(object):
finally:
self._is_processing = False
if self._transaction_manager.deprioritise_transmission:
logger.info("Event queue caught up: re-prioritising transmission")
self._transaction_manager.deprioritise_transmission = False
def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
def _send_pdu(self, pdu, destinations):
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
# table and we'll get back to it later.
@@ -274,11 +294,11 @@ class FederationSender(object):
self._get_per_destination_queue(destination).send_pdu(pdu, order)
@defer.inlineCallbacks
def send_read_receipt(self, receipt: ReadReceipt):
def send_read_receipt(self, receipt):
"""Send a RR to any other servers in the room
Args:
receipt: receipt to be sent
receipt (synapse.types.ReadReceipt): receipt to be sent
"""
# Some background on the rate-limiting going on here.
@@ -341,7 +361,7 @@ class FederationSender(object):
else:
queue.flush_read_receipts_for_room(room_id)
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
def _schedule_rr_flush_for_room(self, room_id, n_domains):
# that is going to cause approximately len(domains) transactions, so now back
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
@@ -350,7 +370,7 @@ class FederationSender(object):
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
self._queues_awaiting_rr_flush_by_room[room_id] = set()
def _flush_rrs_for_room(self, room_id: str) -> None:
def _flush_rrs_for_room(self, room_id):
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
logger.debug("Flushing RRs in %s to %s", room_id, queues)
@@ -366,11 +386,14 @@ class FederationSender(object):
@preserve_fn # the caller should not yield on this
@defer.inlineCallbacks
def send_presence(self, states: List[UserPresenceState]):
def send_presence(self, states):
"""Send the new presence states to the appropriate destinations.
This actually queues up the presence states ready for sending and
triggers a background task to process them and send out the transactions.
Args:
states (list(UserPresenceState))
"""
if not self.hs.config.use_presence:
# No-op if presence is disabled.
@@ -407,10 +430,11 @@ class FederationSender(object):
finally:
self._processing_pending_presence = False
def send_presence_to_destinations(
self, states: List[UserPresenceState], destinations: List[str]
) -> None:
def send_presence_to_destinations(self, states, destinations):
"""Send the given presence states to the given destinations.
Args:
states (list[UserPresenceState])
destinations (list[str])
"""
@@ -425,9 +449,12 @@ class FederationSender(object):
@measure_func("txnqueue._process_presence")
@defer.inlineCallbacks
def _process_presence_inner(self, states: List[UserPresenceState]):
def _process_presence_inner(self, states):
"""Given a list of states populate self.pending_presence_by_dest and
poke to send a new transaction to each destination
Args:
states (list(UserPresenceState))
"""
hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
@@ -437,20 +464,14 @@ class FederationSender(object):
continue
self._get_per_destination_queue(destination).send_presence(states)
def build_and_send_edu(
self,
destination: str,
edu_type: str,
content: dict,
key: Optional[Hashable] = None,
):
def build_and_send_edu(self, destination, edu_type, content, key=None):
"""Construct an Edu object, and queue it for sending
Args:
destination: name of server to send to
edu_type: type of EDU to send
content: content of EDU
key: clobbering key for this edu
destination (str): name of server to send to
edu_type (str): type of EDU to send
content (dict): content of EDU
key (Any|None): clobbering key for this edu
"""
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
@@ -465,12 +486,12 @@ class FederationSender(object):
self.send_edu(edu, key)
def send_edu(self, edu: Edu, key: Optional[Hashable]):
def send_edu(self, edu, key):
"""Queue an EDU for sending
Args:
edu: edu to send
key: clobbering key for this edu
edu (Edu): edu to send
key (Any|None): clobbering key for this edu
"""
queue = self._get_per_destination_queue(edu.destination)
if key:
@@ -478,7 +499,7 @@ class FederationSender(object):
else:
queue.send_edu(edu)
def send_device_messages(self, destination: str):
def send_device_messages(self, destination):
if destination == self.server_name:
logger.warning("Not sending device update to ourselves")
return
@@ -498,5 +519,5 @@ class FederationSender(object):
self._get_per_destination_queue(destination).attempt_new_transaction()
def get_current_token(self) -> int:
def get_current_token(self):
return 0

View File

@@ -15,11 +15,12 @@
# limitations under the License.
import datetime
import logging
from typing import Dict, Hashable, Iterable, List, Tuple
import random
from prometheus_client import Counter
import synapse.server
from twisted.internet import defer
from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
@@ -31,12 +32,14 @@ from synapse.handlers.presence import format_user_presence_state
from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
from synapse.types import ReadReceipt
from synapse.types import StateMap
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
# This is defined in the Matrix spec and enforced by the receiver.
MAX_EDUS_PER_TRANSACTION = 100
DEPRIORITISE_SLEEP_TIME = 10
logger = logging.getLogger(__name__)
@@ -56,18 +59,13 @@ class PerDestinationQueue(object):
Manages the per-destination transmission queues.
Args:
hs
transaction_sender
destination: the server_name of the destination that we are managing
hs (synapse.HomeServer):
transaction_sender (TransactionManager):
destination (str): the server_name of the destination that we are managing
transmission for.
"""
def __init__(
self,
hs: "synapse.server.HomeServer",
transaction_manager: "synapse.federation.sender.TransactionManager",
destination: str,
):
def __init__(self, hs, transaction_manager, destination):
self._server_name = hs.hostname
self._clock = hs.get_clock()
self._store = hs.get_datastore()
@@ -77,20 +75,20 @@ class PerDestinationQueue(object):
self.transmission_loop_running = False
# a list of tuples of (pending pdu, order)
self._pending_pdus = [] # type: List[Tuple[EventBase, int]]
self._pending_edus = [] # type: List[Edu]
self._pending_pdus = [] # type: list[tuple[EventBase, int]]
self._pending_edus = [] # type: list[Edu]
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
# based on their key (e.g. typing events by room_id)
# Map of (edu_type, key) -> Edu
self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu]
self._pending_edus_keyed = {} # type: StateMap[Edu]
# Map of user_id -> UserPresenceState of pending presence to be sent to this
# destination
self._pending_presence = {} # type: Dict[str, UserPresenceState]
self._pending_presence = {} # type: dict[str, UserPresenceState]
# room_id -> receipt_type -> user_id -> receipt_dict
self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]]
self._pending_rrs = {}
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
@@ -100,50 +98,50 @@ class PerDestinationQueue(object):
# stream_id of last successfully sent device list update.
self._last_device_list_stream_id = 0
def __str__(self) -> str:
def __str__(self):
return "PerDestinationQueue[%s]" % self._destination
def pending_pdu_count(self) -> int:
def pending_pdu_count(self):
return len(self._pending_pdus)
def pending_edu_count(self) -> int:
def pending_edu_count(self):
return (
len(self._pending_edus)
+ len(self._pending_presence)
+ len(self._pending_edus_keyed)
)
def send_pdu(self, pdu: EventBase, order: int) -> None:
def send_pdu(self, pdu, order):
"""Add a PDU to the queue, and start the transmission loop if neccessary
Args:
pdu: pdu to send
order
pdu (EventBase): pdu to send
order (int):
"""
self._pending_pdus.append((pdu, order))
self.attempt_new_transaction()
def send_presence(self, states: Iterable[UserPresenceState]) -> None:
def send_presence(self, states):
"""Add presence updates to the queue. Start the transmission loop if neccessary.
Args:
states: presence to send
states (iterable[UserPresenceState]): presence to send
"""
self._pending_presence.update({state.user_id: state for state in states})
self.attempt_new_transaction()
def queue_read_receipt(self, receipt: ReadReceipt) -> None:
def queue_read_receipt(self, receipt):
"""Add a RR to the list to be sent. Doesn't start the transmission loop yet
(see flush_read_receipts_for_room)
Args:
receipt: receipt to be queued
receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
"""
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
receipt.receipt_type, {}
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
def flush_read_receipts_for_room(self, room_id: str) -> None:
def flush_read_receipts_for_room(self, room_id):
# if we don't have any read-receipts for this room, it may be that we've already
# sent them out, so we don't need to flush.
if room_id not in self._pending_rrs:
@@ -151,15 +149,15 @@ class PerDestinationQueue(object):
self._rrs_pending_flush = True
self.attempt_new_transaction()
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
def send_keyed_edu(self, edu, key):
self._pending_edus_keyed[(edu.edu_type, key)] = edu
self.attempt_new_transaction()
def send_edu(self, edu) -> None:
def send_edu(self, edu):
self._pending_edus.append(edu)
self.attempt_new_transaction()
def attempt_new_transaction(self) -> None:
def attempt_new_transaction(self):
"""Try to start a new transaction to this destination
If there is already a transaction in progress to this destination,
@@ -182,22 +180,35 @@ class PerDestinationQueue(object):
self._transaction_transmission_loop,
)
async def _transaction_transmission_loop(self) -> None:
pending_pdus = [] # type: List[Tuple[EventBase, int]]
@defer.inlineCallbacks
def _transaction_transmission_loop(self):
pending_pdus = []
try:
self.transmission_loop_running = True
# This will throw if we wouldn't retry. We do this here so we fail
# quickly, but we will later check this again in the http client,
# hence why we throw the result away.
await get_retry_limiter(self._destination, self._clock, self._store)
yield get_retry_limiter(self._destination, self._clock, self._store)
pending_pdus = []
while True:
if self._transaction_manager.deprioritise_transmission:
# if the event-processing loop has got behind, sleep to give it
# a chance to catch up. Add some randomness so that the transmitters
# don't all wake up in sync.
sleeptime = random.uniform(
DEPRIORITISE_SLEEP_TIME, DEPRIORITISE_SLEEP_TIME * 2
)
logger.info(
"TX [%s]: sleeping for %f seconds", self._destination, sleeptime
)
yield self._clock.sleep(sleeptime)
# We have to keep 2 free slots for presence and rr_edus
limit = MAX_EDUS_PER_TRANSACTION - 2
device_update_edus, dev_list_id = await self._get_device_update_edus(
device_update_edus, dev_list_id = yield self._get_device_update_edus(
limit
)
@@ -206,7 +217,7 @@ class PerDestinationQueue(object):
(
to_device_edus,
device_stream_id,
) = await self._get_to_device_message_edus(limit)
) = yield self._get_to_device_message_edus(limit)
pending_edus = device_update_edus + to_device_edus
@@ -273,7 +284,7 @@ class PerDestinationQueue(object):
# END CRITICAL SECTION
success = await self._transaction_manager.send_new_transaction(
success = yield self._transaction_manager.send_new_transaction(
self._destination, pending_pdus, pending_edus
)
if success:
@@ -284,7 +295,7 @@ class PerDestinationQueue(object):
# Remove the acknowledged device messages from the database
# Only bother if we actually sent some device messages
if to_device_edus:
await self._store.delete_device_msgs_for_remote(
yield self._store.delete_device_msgs_for_remote(
self._destination, device_stream_id
)
@@ -293,7 +304,7 @@ class PerDestinationQueue(object):
logger.info(
"Marking as sent %r %r", self._destination, dev_list_id
)
await self._store.mark_as_sent_devices_by_remote(
yield self._store.mark_as_sent_devices_by_remote(
self._destination, dev_list_id
)
@@ -338,7 +349,7 @@ class PerDestinationQueue(object):
# We want to be *very* sure we clear this after we stop processing
self.transmission_loop_running = False
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
def _get_rr_edus(self, force_flush):
if not self._pending_rrs:
return
if not force_flush and not self._rrs_pending_flush:
@@ -355,16 +366,17 @@ class PerDestinationQueue(object):
self._rrs_pending_flush = False
yield edu
def _pop_pending_edus(self, limit: int) -> List[Edu]:
def _pop_pending_edus(self, limit):
pending_edus = self._pending_edus
pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
return pending_edus
async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
@defer.inlineCallbacks
def _get_device_update_edus(self, limit):
last_device_list = self._last_device_list_stream_id
# Retrieve list of new device updates to send to the destination
now_stream_id, results = await self._store.get_device_updates_by_remote(
now_stream_id, results = yield self._store.get_device_updates_by_remote(
self._destination, last_device_list, limit=limit
)
edus = [
@@ -381,10 +393,11 @@ class PerDestinationQueue(object):
return (edus, now_stream_id)
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
@defer.inlineCallbacks
def _get_to_device_message_edus(self, limit):
last_device_stream_id = self._last_device_stream_id
to_device_stream_id = self._store.get_to_device_stream_token()
contents, stream_id = await self._store.get_new_device_msgs_for_remote(
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
self._destination, last_device_stream_id, to_device_stream_id, limit
)
edus = [

View File

@@ -13,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from canonicaljson import json
import synapse.server
from twisted.internet import defer
from synapse.api.errors import HttpResponseException
from synapse.events import EventBase
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.federation.units import Transaction
from synapse.logging.opentracing import (
extract_text_map,
set_tag,
@@ -40,7 +39,7 @@ class TransactionManager(object):
shared between PerDestinationQueue objects
"""
def __init__(self, hs: "synapse.server.HomeServer"):
def __init__(self, hs):
self._server_name = hs.hostname
self.clock = hs.get_clock() # nb must be called this for @measure_func
self._store = hs.get_datastore()
@@ -50,10 +49,13 @@ class TransactionManager(object):
# HACK to get unique tx id
self._next_txn_id = int(self.clock.time_msec())
# the federation sender sometimes sets this to delay transaction transmission,
# if the sender gets behind.
self.deprioritise_transmission = False
@measure_func("_send_new_transaction")
async def send_new_transaction(
self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
):
@defer.inlineCallbacks
def send_new_transaction(self, destination, pending_pdus, pending_edus):
# Make a transaction-sending opentracing span. This span follows on from
# all the edus in that transaction. This needs to be done since there is
@@ -129,7 +131,7 @@ class TransactionManager(object):
return data
try:
response = await self._transport_layer.send_transaction(
response = yield self._transport_layer.send_transaction(
transaction, json_data_cb
)
code = 200

View File

@@ -152,7 +152,7 @@ class TransportLayerClient(object):
# generated by the json_data_callback.
json_data = transaction.get_dict()
path = _create_v1_path("/send/%s", transaction.transaction_id)
path = _create_v1_path("/send/%s/", transaction.transaction_id)
response = yield self.client.put_json(
transaction.destination,
@@ -161,7 +161,7 @@ class TransportLayerClient(object):
json_data_callback=json_data_callback,
long_retries=True,
backoff_on_404=True, # If we get a 404 the other side has gone
try_trailing_slash_on_400=True,
# try_trailing_slash_on_400=True,
)
return response

View File

@@ -158,7 +158,7 @@ class Authenticator(object):
origin, json_request, now, "Incoming request"
)
logger.debug("Request from %s", origin)
logger.info("Request from %s", origin)
request.authenticated_entity = origin
# If we get a valid signed request from the other side, its probably
@@ -579,7 +579,7 @@ class FederationV1InviteServlet(BaseFederationServlet):
# state resolution algorithm, and we don't use that for processing
# invites
content = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
origin, content, room_version=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
@@ -606,7 +606,7 @@ class FederationV2InviteServlet(BaseFederationServlet):
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
content = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
origin, event, room_version=room_version
)
return 200, content
@@ -643,6 +643,17 @@ class FederationClientKeysClaimServlet(BaseFederationServlet):
return 200, response
class FederationQueryAuthServlet(BaseFederationServlet):
PATH = "/query_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
async def on_POST(self, origin, content, query, context, event_id):
new_content = await self.handler.on_query_auth_request(
origin, content, context, event_id
)
return 200, new_content
class FederationGetMissingEventsServlet(BaseFederationServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
@@ -1401,6 +1412,7 @@ FEDERATION_SERVLET_CLASSES = (
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationQueryAuthServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,

View File

@@ -19,15 +19,11 @@ server protocol.
import logging
import attr
from synapse.types import JsonDict
from synapse.util.jsonobject import JsonEncodedObject
logger = logging.getLogger(__name__)
@attr.s(slots=True)
class Edu(JsonEncodedObject):
""" An Edu represents a piece of data sent from one homeserver to another.
@@ -36,24 +32,11 @@ class Edu(JsonEncodedObject):
internal ID or previous references graph.
"""
edu_type = attr.ib(type=str)
content = attr.ib(type=dict)
origin = attr.ib(type=str)
destination = attr.ib(type=str)
valid_keys = ["origin", "destination", "edu_type", "content"]
def get_dict(self) -> JsonDict:
return {
"edu_type": self.edu_type,
"content": self.content,
}
required_keys = ["edu_type"]
def get_internal_dict(self) -> JsonDict:
return {
"edu_type": self.edu_type,
"content": self.content,
"origin": self.origin,
"destination": self.destination,
}
internal_keys = ["origin", "destination"]
def get_context(self):
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")

View File

@@ -608,7 +608,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
user_results = yield self.store.get_users_in_group(
group_id, include_private=True
)
if user_id in (user_result["user_id"] for user_result in user_results):
if user_id in [user_result["user_id"] for user_result in user_results]:
raise SynapseError(400, "User already in group")
content = {

View File

@@ -44,11 +44,7 @@ class AccountValidityHandler(object):
self._account_validity = self.hs.config.account_validity
if (
self._account_validity.enabled
and self._account_validity.renew_by_email_enabled
and load_jinja2_templates
):
if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
# Don't do email-specific configuration if renewal by email is disabled.
try:
app_name = self.hs.config.email_app_name

View File

@@ -25,15 +25,6 @@ from synapse.app import check_bind_error
logger = logging.getLogger(__name__)
ACME_REGISTER_FAIL_ERROR = """
--------------------------------------------------------------------------------
Failed to register with the ACME provider. This is likely happening because the installation
is new, and ACME v1 has been deprecated by Let's Encrypt and disabled for
new installations since November 2019.
At the moment, Synapse doesn't support ACME v2. For more information and alternative
solutions, please read https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
--------------------------------------------------------------------------------"""
class AcmeHandler(object):
def __init__(self, hs):
@@ -80,12 +71,7 @@ class AcmeHandler(object):
# want it to control where we save the certificates, we have to reach in
# and trigger the registration machinery ourselves.
self._issuer._registered = False
try:
yield self._issuer._ensure_registered()
except Exception:
logger.error(ACME_REGISTER_FAIL_ERROR)
raise
yield self._issuer._ensure_registered()
@defer.inlineCallbacks
def provision_certificate(self):

View File

@@ -58,10 +58,8 @@ class AdminHandler(BaseHandler):
ret = await self.store.get_user_by_id(user.to_string())
if ret:
profile = await self.store.get_profileinfo(user.localpart)
threepids = await self.store.user_get_threepids(user.to_string())
ret["displayname"] = profile.display_name
ret["avatar_url"] = profile.avatar_url
ret["threepids"] = threepids
return ret
async def export_user_data(self, user_id, writer):

View File

@@ -17,11 +17,9 @@
import logging
import time
import unicodedata
import urllib.parse
from typing import Any, Dict, Iterable, List, Optional
import attr
import bcrypt # type: ignore[import]
import bcrypt
import pymacaroons
from twisted.internet import defer
@@ -40,12 +38,9 @@ from synapse.api.errors import (
from synapse.api.ratelimiting import Ratelimiter
from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS
from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker
from synapse.http.server import finish_request
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.module_api import ModuleApi
from synapse.push.mailer import load_jinja2_templates
from synapse.types import Requester, UserID
from synapse.types import UserID
from synapse.util.caches.expiringcache import ExpiringCache
from ._base import BaseHandler
@@ -63,11 +58,11 @@ class AuthHandler(BaseHandler):
"""
super(AuthHandler, self).__init__(hs)
self.checkers = {} # type: Dict[str, UserInteractiveAuthChecker]
self.checkers = {} # type: dict[str, UserInteractiveAuthChecker]
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
inst = auth_checker_class(hs)
if inst.is_enabled():
self.checkers[inst.AUTH_TYPE] = inst # type: ignore
self.checkers[inst.AUTH_TYPE] = inst
self.bcrypt_rounds = hs.config.bcrypt_rounds
@@ -113,20 +108,8 @@ class AuthHandler(BaseHandler):
self._clock = self.hs.get_clock()
# Load the SSO redirect confirmation page HTML template
self._sso_redirect_confirm_template = load_jinja2_templates(
hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"],
)[0]
self._server_name = hs.config.server_name
# cast to tuple for use with str.startswith
self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
@defer.inlineCallbacks
def validate_user_via_ui_auth(
self, requester: Requester, request_body: Dict[str, Any], clientip: str
):
def validate_user_via_ui_auth(self, requester, request_body, clientip):
"""
Checks that the user is who they claim to be, via a UI auth.
@@ -135,11 +118,11 @@ class AuthHandler(BaseHandler):
that it isn't stolen by re-authenticating them.
Args:
requester: The user, as given by the access token
requester (Requester): The user, as given by the access token
request_body: The body of the request sent by the client
request_body (dict): The body of the request sent by the client
clientip: The IP address of the client.
clientip (str): The IP address of the client.
Returns:
defer.Deferred[dict]: the parameters for this request (which may
@@ -210,9 +193,7 @@ class AuthHandler(BaseHandler):
return self.checkers.keys()
@defer.inlineCallbacks
def check_auth(
self, flows: List[List[str]], clientdict: Dict[str, Any], clientip: str
):
def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@@ -227,14 +208,14 @@ class AuthHandler(BaseHandler):
decorator.
Args:
flows: A list of login flows. Each flow is an ordered list of
strings representing auth-types. At least one full
flow must be completed in order for auth to be successful.
flows (list): A list of login flows. Each flow is an ordered list of
strings representing auth-types. At least one full
flow must be completed in order for auth to be successful.
clientdict: The dictionary from the client root level, not the
'auth' key: this method prompts for auth if none is sent.
clientip: The IP address of the client.
clientip (str): The IP address of the client.
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
@@ -254,7 +235,7 @@ class AuthHandler(BaseHandler):
"""
authdict = None
sid = None # type: Optional[str]
sid = None
if clientdict and "auth" in clientdict:
authdict = clientdict["auth"]
del clientdict["auth"]
@@ -287,9 +268,9 @@ class AuthHandler(BaseHandler):
creds = session["creds"]
# check auth type currently being presented
errordict = {} # type: Dict[str, Any]
errordict = {}
if "type" in authdict:
login_type = authdict["type"] # type: str
login_type = authdict["type"]
try:
result = yield self._check_auth_dict(authdict, clientip)
if result:
@@ -330,7 +311,7 @@ class AuthHandler(BaseHandler):
raise InteractiveAuthIncompleteError(ret)
@defer.inlineCallbacks
def add_oob_auth(self, stagetype: str, authdict: Dict[str, Any], clientip: str):
def add_oob_auth(self, stagetype, authdict, clientip):
"""
Adds the result of out-of-band authentication into an existing auth
session. Currently used for adding the result of fallback auth.
@@ -352,7 +333,7 @@ class AuthHandler(BaseHandler):
return True
return False
def get_session_id(self, clientdict: Dict[str, Any]) -> Optional[str]:
def get_session_id(self, clientdict):
"""
Gets the session ID for a client given the client dictionary
@@ -360,7 +341,7 @@ class AuthHandler(BaseHandler):
clientdict: The dictionary sent by the client in the request
Returns:
The string session ID the client sent. If the client did
str|None: The string session ID the client sent. If the client did
not send a session ID, returns None.
"""
sid = None
@@ -370,42 +351,40 @@ class AuthHandler(BaseHandler):
sid = authdict["session"]
return sid
def set_session_data(self, session_id: str, key: str, value: Any) -> None:
def set_session_data(self, session_id, key, value):
"""
Store a key-value pair into the sessions data associated with this
request. This data is stored server-side and cannot be modified by
the client.
Args:
session_id: The ID of this session as returned from check_auth
key: The key to store the data under
value: The data to store
session_id (string): The ID of this session as returned from check_auth
key (string): The key to store the data under
value (any): The data to store
"""
sess = self._get_session_info(session_id)
sess.setdefault("serverdict", {})[key] = value
self._save_session(sess)
def get_session_data(
self, session_id: str, key: str, default: Optional[Any] = None
) -> Any:
def get_session_data(self, session_id, key, default=None):
"""
Retrieve data stored with set_session_data
Args:
session_id: The ID of this session as returned from check_auth
key: The key to store the data under
default: Value to return if the key has not been set
session_id (string): The ID of this session as returned from check_auth
key (string): The key to store the data under
default (any): Value to return if the key has not been set
"""
sess = self._get_session_info(session_id)
return sess.setdefault("serverdict", {}).get(key, default)
@defer.inlineCallbacks
def _check_auth_dict(self, authdict: Dict[str, Any], clientip: str):
def _check_auth_dict(self, authdict, clientip):
"""Attempt to validate the auth dict provided by a client
Args:
authdict: auth dict provided by the client
clientip: IP address of the client
authdict (object): auth dict provided by the client
clientip (str): IP address of the client
Returns:
Deferred: result of the stage verification.
@@ -431,10 +410,10 @@ class AuthHandler(BaseHandler):
(canonical_id, callback) = yield self.validate_login(user_id, authdict)
return canonical_id
def _get_params_recaptcha(self) -> dict:
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
def _get_params_terms(self) -> dict:
def _get_params_terms(self):
return {
"policies": {
"privacy_policy": {
@@ -451,9 +430,7 @@ class AuthHandler(BaseHandler):
}
}
def _auth_dict_for_flows(
self, flows: List[List[str]], session: Dict[str, Any]
) -> Dict[str, Any]:
def _auth_dict_for_flows(self, flows, session):
public_flows = []
for f in flows:
public_flows.append(f)
@@ -463,7 +440,7 @@ class AuthHandler(BaseHandler):
LoginType.TERMS: self._get_params_terms,
}
params = {} # type: Dict[str, Any]
params = {}
for f in public_flows:
for stage in f:
@@ -476,13 +453,7 @@ class AuthHandler(BaseHandler):
"params": params,
}
def _get_session_info(self, session_id: Optional[str]) -> dict:
"""
Gets or creates a session given a session ID.
The session can be used to track data across multiple requests, e.g. for
interactive authentication.
"""
def _get_session_info(self, session_id):
if session_id not in self.sessions:
session_id = None
@@ -495,9 +466,7 @@ class AuthHandler(BaseHandler):
return self.sessions[session_id]
@defer.inlineCallbacks
def get_access_token_for_user_id(
self, user_id: str, device_id: Optional[str], valid_until_ms: Optional[int]
):
def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms):
"""
Creates a new access token for the user with the given user ID.
@@ -507,11 +476,11 @@ class AuthHandler(BaseHandler):
The device will be recorded in the table if it is not there already.
Args:
user_id: canonical User ID
device_id: the device ID to associate with the tokens.
user_id (str): canonical User ID
device_id (str|None): the device ID to associate with the tokens.
None to leave the tokens unassociated with a device (deprecated:
we should always have a device ID)
valid_until_ms: when the token is valid until. None for
valid_until_ms (int|None): when the token is valid until. None for
no expiry.
Returns:
The access token for the user's session.
@@ -546,13 +515,13 @@ class AuthHandler(BaseHandler):
return access_token
@defer.inlineCallbacks
def check_user_exists(self, user_id: str):
def check_user_exists(self, user_id):
"""
Checks to see if a user with the given id exists. Will check case
insensitively, but return None if there are multiple inexact matches.
Args:
user_id: complete @user:id
(unicode|bytes) user_id: complete @user:id
Returns:
defer.Deferred: (unicode) canonical_user_id, or None if zero or
@@ -567,7 +536,7 @@ class AuthHandler(BaseHandler):
return None
@defer.inlineCallbacks
def _find_user_id_and_pwd_hash(self, user_id: str):
def _find_user_id_and_pwd_hash(self, user_id):
"""Checks to see if a user with the given id exists. Will check case
insensitively, but will return None if there are multiple inexact
matches.
@@ -597,7 +566,7 @@ class AuthHandler(BaseHandler):
)
return result
def get_supported_login_types(self) -> Iterable[str]:
def get_supported_login_types(self):
"""Get a the login types supported for the /login API
By default this is just 'm.login.password' (unless password_enabled is
@@ -605,20 +574,20 @@ class AuthHandler(BaseHandler):
other login types.
Returns:
login types
Iterable[str]: login types
"""
return self._supported_login_types
@defer.inlineCallbacks
def validate_login(self, username: str, login_submission: Dict[str, Any]):
def validate_login(self, username, login_submission):
"""Authenticates the user for the /login API
Also used by the user-interactive auth flow to validate
m.login.password auth types.
Args:
username: username supplied by the user
login_submission: the whole of the login submission
username (str): username supplied by the user
login_submission (dict): the whole of the login submission
(including 'type' and other relevant fields)
Returns:
Deferred[str, func]: canonical user id, and optional callback
@@ -706,13 +675,13 @@ class AuthHandler(BaseHandler):
raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN)
@defer.inlineCallbacks
def check_password_provider_3pid(self, medium: str, address: str, password: str):
def check_password_provider_3pid(self, medium, address, password):
"""Check if a password provider is able to validate a thirdparty login
Args:
medium: The medium of the 3pid (ex. email).
address: The address of the 3pid (ex. jdoe@example.com).
password: The password of the user.
medium (str): The medium of the 3pid (ex. email).
address (str): The address of the 3pid (ex. jdoe@example.com).
password (str): The password of the user.
Returns:
Deferred[(str|None, func|None)]: A tuple of `(user_id,
@@ -740,15 +709,15 @@ class AuthHandler(BaseHandler):
return None, None
@defer.inlineCallbacks
def _check_local_password(self, user_id: str, password: str):
def _check_local_password(self, user_id, password):
"""Authenticate a user against the local password database.
user_id is checked case insensitively, but will return None if there are
multiple inexact matches.
Args:
user_id: complete @user:id
password: the provided password
user_id (unicode): complete @user:id
password (unicode): the provided password
Returns:
Deferred[unicode] the canonical_user_id, or Deferred[None] if
unknown user/bad password
@@ -771,7 +740,7 @@ class AuthHandler(BaseHandler):
return user_id
@defer.inlineCallbacks
def validate_short_term_login_token_and_get_user_id(self, login_token: str):
def validate_short_term_login_token_and_get_user_id(self, login_token):
auth_api = self.hs.get_auth()
user_id = None
try:
@@ -785,11 +754,11 @@ class AuthHandler(BaseHandler):
return user_id
@defer.inlineCallbacks
def delete_access_token(self, access_token: str):
def delete_access_token(self, access_token):
"""Invalidate a single access token
Args:
access_token: access token to be deleted
access_token (str): access token to be deleted
Returns:
Deferred
@@ -814,17 +783,15 @@ class AuthHandler(BaseHandler):
@defer.inlineCallbacks
def delete_access_tokens_for_user(
self,
user_id: str,
except_token_id: Optional[str] = None,
device_id: Optional[str] = None,
self, user_id, except_token_id=None, device_id=None
):
"""Invalidate access tokens belonging to a user
Args:
user_id: ID of user the tokens belong to
except_token_id: access_token ID which should *not* be deleted
device_id: ID of device the tokens are associated with.
user_id (str): ID of user the tokens belong to
except_token_id (str|None): access_token ID which should *not* be
deleted
device_id (str|None): ID of device the tokens are associated with.
If None, tokens associated with any device (or no device) will
be deleted
Returns:
@@ -848,15 +815,7 @@ class AuthHandler(BaseHandler):
)
@defer.inlineCallbacks
def add_threepid(self, user_id: str, medium: str, address: str, validated_at: int):
# check if medium has a valid value
if medium not in ["email", "msisdn"]:
raise SynapseError(
code=400,
msg=("'%s' is not a valid value for 'medium'" % (medium,)),
errcode=Codes.INVALID_PARAM,
)
def add_threepid(self, user_id, medium, address, validated_at):
# 'Canonicalise' email addresses down to lower case.
# We've now moving towards the homeserver being the entity that
# is responsible for validating threepids used for resetting passwords
@@ -874,20 +833,19 @@ class AuthHandler(BaseHandler):
)
@defer.inlineCallbacks
def delete_threepid(
self, user_id: str, medium: str, address: str, id_server: Optional[str] = None
):
def delete_threepid(self, user_id, medium, address, id_server=None):
"""Attempts to unbind the 3pid on the identity servers and deletes it
from the local database.
Args:
user_id: ID of user to remove the 3pid from.
medium: The medium of the 3pid being removed: "email" or "msisdn".
address: The 3pid address to remove.
id_server: Use the given identity server when unbinding
user_id (str)
medium (str)
address (str)
id_server (str|None): Use the given identity server when unbinding
any threepids. If None then will attempt to unbind using the
identity server specified when binding (if known).
Returns:
Deferred[bool]: Returns True if successfully unbound the 3pid on
the identity server, False if identity server doesn't support the
@@ -906,18 +864,17 @@ class AuthHandler(BaseHandler):
yield self.store.user_delete_threepid(user_id, medium, address)
return result
def _save_session(self, session: Dict[str, Any]) -> None:
"""Update the last used time on the session to now and add it back to the session store."""
def _save_session(self, session):
# TODO: Persistent storage
logger.debug("Saving session %s", session)
session["last_used"] = self.hs.get_clock().time_msec()
self.sessions[session["id"]] = session
def hash(self, password: str):
def hash(self, password):
"""Computes a secure hash of password.
Args:
password: Password to hash.
password (unicode): Password to hash.
Returns:
Deferred(unicode): Hashed password.
@@ -934,12 +891,12 @@ class AuthHandler(BaseHandler):
return defer_to_thread(self.hs.get_reactor(), _do_hash)
def validate_hash(self, password: str, stored_hash: bytes):
def validate_hash(self, password, stored_hash):
"""Validates that self.hash(password) == stored_hash.
Args:
password: Password to hash.
stored_hash: Expected hash value.
password (unicode): Password to hash.
stored_hash (bytes): Expected hash value.
Returns:
Deferred(bool): Whether self.hash(password) == stored_hash.
@@ -962,74 +919,13 @@ class AuthHandler(BaseHandler):
else:
return defer.succeed(False)
def complete_sso_login(
self,
registered_user_id: str,
request: SynapseRequest,
client_redirect_url: str,
):
"""Having figured out a mxid for this user, complete the HTTP request
Args:
registered_user_id: The registered user ID to complete SSO login for.
request: The request to complete.
client_redirect_url: The URL to which to redirect the user at the end of the
process.
"""
# Create a login token
login_token = self.macaroon_gen.generate_short_term_login_token(
registered_user_id
)
# Append the login token to the original redirect URL (i.e. with its query
# parameters kept intact) to build the URL to which the template needs to
# redirect the users once they have clicked on the confirmation link.
redirect_url = self.add_query_param_to_url(
client_redirect_url, "loginToken", login_token
)
# if the client is whitelisted, we can redirect straight to it
if client_redirect_url.startswith(self._whitelisted_sso_clients):
request.redirect(redirect_url)
finish_request(request)
return
# Otherwise, serve the redirect confirmation page.
# Remove the query parameters from the redirect URL to get a shorter version of
# it. This is only to display a human-readable URL in the template, but not the
# URL we redirect users to.
redirect_url_no_params = client_redirect_url.split("?")[0]
html = self._sso_redirect_confirm_template.render(
display_url=redirect_url_no_params,
redirect_url=redirect_url,
server_name=self._server_name,
).encode("utf-8")
request.setResponseCode(200)
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.setHeader(b"Content-Length", b"%d" % (len(html),))
request.write(html)
finish_request(request)
@staticmethod
def add_query_param_to_url(url: str, param_name: str, param: Any):
url_parts = list(urllib.parse.urlparse(url))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update({param_name: param})
url_parts[4] = urllib.parse.urlencode(query)
return urllib.parse.urlunparse(url_parts)
@attr.s
class MacaroonGenerator(object):
hs = attr.ib()
def generate_access_token(
self, user_id: str, extra_caveats: Optional[List[str]] = None
) -> str:
def generate_access_token(self, user_id, extra_caveats=None):
extra_caveats = extra_caveats or []
macaroon = self._generate_base_macaroon(user_id)
macaroon.add_first_party_caveat("type = access")
@@ -1042,9 +938,16 @@ class MacaroonGenerator(object):
macaroon.add_first_party_caveat(caveat)
return macaroon.serialize()
def generate_short_term_login_token(
self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
) -> str:
def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
"""
Args:
user_id (unicode):
duration_in_ms (int):
Returns:
unicode
"""
macaroon = self._generate_base_macaroon(user_id)
macaroon.add_first_party_caveat("type = login")
now = self.hs.get_clock().time_msec()
@@ -1052,12 +955,12 @@ class MacaroonGenerator(object):
macaroon.add_first_party_caveat("time < %d" % (expiry,))
return macaroon.serialize()
def generate_delete_pusher_token(self, user_id: str) -> str:
def generate_delete_pusher_token(self, user_id):
macaroon = self._generate_base_macaroon(user_id)
macaroon.add_first_party_caveat("type = delete_pusher")
return macaroon.serialize()
def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon:
def _generate_base_macaroon(self, user_id):
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",

View File

@@ -26,7 +26,6 @@ from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import RoomStreamToken, get_domain_from_id
@@ -40,8 +39,6 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
MAX_DEVICE_DISPLAY_NAME_LEN = 100
class DeviceWorkerHandler(BaseHandler):
def __init__(self, hs):
@@ -407,18 +404,9 @@ class DeviceHandler(DeviceWorkerHandler):
defer.Deferred:
"""
# Reject a new displayname which is too long.
new_display_name = content.get("display_name")
if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN:
raise SynapseError(
400,
"Device display name is too long (max %i)"
% (MAX_DEVICE_DISPLAY_NAME_LEN,),
)
try:
yield self.store.update_device(
user_id, device_id, new_display_name=new_display_name
user_id, device_id, new_display_name=content.get("display_name")
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError as e:
@@ -742,6 +730,6 @@ class DeviceListUpdater(object):
# We clobber the seen updates since we've re-synced from a given
# point.
self._seen_updates[user_id] = {stream_id}
self._seen_updates[user_id] = set([stream_id])
defer.returnValue(result)

View File

@@ -13,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
from typing import Iterable, List, Optional
from twisted.internet import defer
@@ -28,8 +28,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
from synapse.appservice import ApplicationService
from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
from synapse.types import RoomAlias, UserID, get_domain_from_id
from ._base import BaseHandler
@@ -56,13 +55,7 @@ class DirectoryHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
@defer.inlineCallbacks
def _create_association(
self,
room_alias: RoomAlias,
room_id: str,
servers: Optional[Iterable[str]] = None,
creator: Optional[str] = None,
):
def _create_association(self, room_alias, room_id, servers=None, creator=None):
# general association creation for both human users and app services
for wchar in string.whitespace:
@@ -77,7 +70,7 @@ class DirectoryHandler(BaseHandler):
# TODO(erikj): Check if there is a current association.
if not servers:
users = yield self.state.get_current_users_in_room(room_id)
servers = {get_domain_from_id(u) for u in users}
servers = set(get_domain_from_id(u) for u in users)
if not servers:
raise SynapseError(400, "Failed to get server list")
@@ -89,20 +82,23 @@ class DirectoryHandler(BaseHandler):
@defer.inlineCallbacks
def create_association(
self,
requester: Requester,
room_alias: RoomAlias,
room_id: str,
servers: Optional[List[str]] = None,
check_membership: bool = True,
requester,
room_alias,
room_id,
servers=None,
send_event=True,
check_membership=True,
):
"""Attempt to create a new alias
Args:
requester
room_alias
room_id
servers: Iterable of servers that others servers should try and join via
check_membership: Whether to check if the user is in the room
requester (Requester)
room_alias (RoomAlias)
room_id (str)
servers (list[str]|None): List of servers that others servers
should try and join via
send_event (bool): Whether to send an updated m.room.aliases event
check_membership (bool): Whether to check if the user is in the room
before the alias can be set (if the server's config requires it).
Returns:
@@ -154,17 +150,27 @@ class DirectoryHandler(BaseHandler):
)
yield self._create_association(room_alias, room_id, servers, creator=user_id)
if send_event:
try:
yield self.send_room_alias_update_event(requester, room_id)
except AuthError as e:
# sending the aliases event may fail due to the user not having
# permission in the room; this is permitted.
logger.info("Skipping updating aliases event due to auth error %s", e)
@defer.inlineCallbacks
def delete_association(self, requester: Requester, room_alias: RoomAlias):
def delete_association(self, requester, room_alias, send_event=True):
"""Remove an alias from the directory
(this is only meant for human users; AS users should call
delete_appservice_association)
Args:
requester
room_alias
requester (Requester):
room_alias (RoomAlias):
send_event (bool): Whether to send an updated m.room.aliases event.
Note that, if we delete the canonical alias, we will always attempt
to send an m.room.canonical_alias event
Returns:
Deferred[unicode]: room id that the alias used to point to
@@ -200,16 +206,19 @@ class DirectoryHandler(BaseHandler):
room_id = yield self._delete_association(room_alias)
try:
yield self._update_canonical_alias(requester, user_id, room_id, room_alias)
if send_event:
yield self.send_room_alias_update_event(requester, room_id)
yield self._update_canonical_alias(
requester, requester.user.to_string(), room_id, room_alias
)
except AuthError as e:
logger.info("Failed to update alias events: %s", e)
return room_id
@defer.inlineCallbacks
def delete_appservice_association(
self, service: ApplicationService, room_alias: RoomAlias
):
def delete_appservice_association(self, service, room_alias):
if not service.is_interested_in_alias(room_alias.to_string()):
raise SynapseError(
400,
@@ -219,7 +228,7 @@ class DirectoryHandler(BaseHandler):
yield self._delete_association(room_alias)
@defer.inlineCallbacks
def _delete_association(self, room_alias: RoomAlias):
def _delete_association(self, room_alias):
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room alias must be local")
@@ -228,7 +237,7 @@ class DirectoryHandler(BaseHandler):
return room_id
@defer.inlineCallbacks
def get_association(self, room_alias: RoomAlias):
def get_association(self, room_alias):
room_id = None
if self.hs.is_mine(room_alias):
result = yield self.get_association_from_room_alias(room_alias)
@@ -264,7 +273,7 @@ class DirectoryHandler(BaseHandler):
)
users = yield self.state.get_current_users_in_room(room_id)
extra_servers = {get_domain_from_id(u) for u in users}
extra_servers = set(get_domain_from_id(u) for u in users)
servers = set(extra_servers) | set(servers)
# If this server is in the list of servers, return it first.
@@ -293,58 +302,45 @@ class DirectoryHandler(BaseHandler):
)
@defer.inlineCallbacks
def _update_canonical_alias(
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
):
"""
Send an updated canonical alias event if the removed alias was set as
the canonical alias or listed in the alt_aliases field.
"""
def send_room_alias_update_event(self, requester, room_id):
aliases = yield self.store.get_aliases_for_room(room_id)
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Aliases,
"state_key": self.hs.hostname,
"room_id": room_id,
"sender": requester.user.to_string(),
"content": {"aliases": aliases},
},
ratelimit=False,
)
@defer.inlineCallbacks
def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
alias_event = yield self.state.get_current_state(
room_id, EventTypes.CanonicalAlias, ""
)
# There is no canonical alias, nothing to do.
if not alias_event:
alias_str = room_alias.to_string()
if not alias_event or alias_event.content.get("alias", "") != alias_str:
return
# Obtain a mutable version of the event content.
content = dict(alias_event.content)
send_update = False
# Remove the alias property if it matches the removed alias.
alias_str = room_alias.to_string()
if alias_event.content.get("alias", "") == alias_str:
send_update = True
content.pop("alias", "")
# Filter the alt_aliases property for the removed alias. Note that the
# value is not modified if alt_aliases is of an unexpected form.
alt_aliases = content.get("alt_aliases")
if isinstance(alt_aliases, (list, tuple)) and alias_str in alt_aliases:
send_update = True
alt_aliases = [alias for alias in alt_aliases if alias != alias_str]
if alt_aliases:
content["alt_aliases"] = alt_aliases
else:
del content["alt_aliases"]
if send_update:
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": room_id,
"sender": user_id,
"content": content,
},
ratelimit=False,
)
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": room_id,
"sender": user_id,
"content": {},
},
ratelimit=False,
)
@defer.inlineCallbacks
def get_association_from_room_alias(self, room_alias: RoomAlias):
def get_association_from_room_alias(self, room_alias):
result = yield self.store.get_association_from_room_alias(room_alias)
if not result:
# Query AS to see if it exists
@@ -352,7 +348,7 @@ class DirectoryHandler(BaseHandler):
result = yield as_handler.query_room_alias_exists(room_alias)
return result
def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None):
def can_modify_alias(self, alias, user_id=None):
# Any application service "interested" in an alias they are regexing on
# can modify the alias.
# Users can only modify the alias if ALL the interested services have
@@ -373,42 +369,22 @@ class DirectoryHandler(BaseHandler):
return defer.succeed(True)
@defer.inlineCallbacks
def _user_can_delete_alias(self, alias: RoomAlias, user_id: str):
"""Determine whether a user can delete an alias.
One of the following must be true:
1. The user created the alias.
2. The user is a server administrator.
3. The user has a power-level sufficient to send a canonical alias event
for the current room.
"""
def _user_can_delete_alias(self, alias, user_id):
creator = yield self.store.get_room_alias_creator(alias.to_string())
if creator is not None and creator == user_id:
return True
# Resolve the alias to the corresponding room.
room_mapping = yield self.get_association(alias)
room_id = room_mapping["room_id"]
if not room_id:
return False
res = yield self.auth.check_can_change_room_list(
room_id, UserID.from_string(user_id)
)
return res
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
return is_admin
@defer.inlineCallbacks
def edit_published_room_list(
self, requester: Requester, room_id: str, visibility: str
):
def edit_published_room_list(self, requester, room_id, visibility):
"""Edit the entry of the room in the published room list.
requester
room_id
visibility: "public" or "private"
room_id (str)
visibility (str): "public" or "private"
"""
user_id = requester.user.to_string()
@@ -433,15 +409,7 @@ class DirectoryHandler(BaseHandler):
if room is None:
raise SynapseError(400, "Unknown room")
can_change_room_list = yield self.auth.check_can_change_room_list(
room_id, requester.user
)
if not can_change_room_list:
raise AuthError(
403,
"This server requires you to be a moderator in the room to"
" edit its room list entry",
)
yield self.auth.check_can_change_room_list(room_id, requester.user)
making_public = visibility == "public"
if making_public:
@@ -462,16 +430,16 @@ class DirectoryHandler(BaseHandler):
@defer.inlineCallbacks
def edit_published_appservice_room_list(
self, appservice_id: str, network_id: str, room_id: str, visibility: str
self, appservice_id, network_id, room_id, visibility
):
"""Add or remove a room from the appservice/network specific public
room list.
Args:
appservice_id: ID of the appservice that owns the list
network_id: The ID of the network the list is associated with
room_id
visibility: either "public" or "private"
appservice_id (str): ID of the appservice that owns the list
network_id (str): The ID of the network the list is associated with
room_id (str)
visibility (str): either "public" or "private"
"""
if visibility not in ["public", "private"]:
raise SynapseError(400, "Invalid visibility setting")
@@ -479,19 +447,3 @@ class DirectoryHandler(BaseHandler):
yield self.store.set_room_is_public_appservice(
room_id, appservice_id, network_id, visibility == "public"
)
async def get_aliases_for_room(
self, requester: Requester, room_id: str
) -> List[str]:
"""
Get a list of the aliases that currently point to this room on this server
"""
# allow access to server admins and current members of the room
is_admin = await self.auth.is_server_admin(requester.user)
if not is_admin:
await self.auth.check_user_in_room_or_world_readable(
room_id, requester.user.to_string()
)
aliases = await self.store.get_aliases_for_room(room_id)
return aliases

View File

@@ -207,13 +207,6 @@ class E2eRoomKeysHandler(object):
changed = False # if anything has changed, we need to update the etag
for room_id, room in iteritems(room_keys["rooms"]):
for session_id, room_key in iteritems(room["sessions"]):
if not isinstance(room_key["is_verified"], bool):
msg = (
"is_verified must be a boolean in keys for session %s in"
"room %s" % (session_id, room_id)
)
raise SynapseError(400, msg, Codes.INVALID_PARAM)
log_kv(
{
"message": "Trying to upload room key",

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ import logging
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.events.validator import EventValidator
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import make_deferred_yieldable, run_in_background
@@ -274,11 +274,8 @@ class InitialSyncHandler(BaseHandler):
user_id = requester.user.to_string()
(
membership,
member_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True,
membership, member_event_id = await self._check_in_room_or_world_readable(
room_id, user_id
)
is_peeking = member_event_id is None
@@ -436,3 +433,25 @@ class InitialSyncHandler(BaseHandler):
ret["membership"] = membership
return ret
async def _check_in_room_or_world_readable(self, room_id, user_id):
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = await self.auth.check_user_was_in_room(room_id, user_id)
return member_event.membership, member_event.event_id
except AuthError:
visibility = await self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (
visibility
and visibility.content["history_visibility"] == "world_readable"
):
return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)

View File

@@ -99,9 +99,7 @@ class MessageHandler(object):
(
membership,
membership_event_id,
) = yield self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
if membership == Membership.JOIN:
data = yield self.state.get_current_state(room_id, event_type, state_key)
@@ -160,7 +158,7 @@ class MessageHandler(object):
raise NotFoundError("Can't find event for token %s" % (at_token,))
visible_events = yield filter_events_for_client(
self.storage, user_id, last_events, filter_send_to_client=False
self.storage, user_id, last_events, apply_retention_policies=False
)
event = last_events[0]
@@ -179,9 +177,7 @@ class MessageHandler(object):
(
membership,
membership_event_id,
) = yield self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
if membership == Membership.JOIN:
state_ids = yield self.store.get_filtered_current_state_ids(
@@ -220,8 +216,8 @@ class MessageHandler(object):
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
membership, _ = yield self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
membership, _ = yield self.auth.check_in_room_or_world_readable(
room_id, user_id
)
if membership != Membership.JOIN:
raise NotImplementedError(
@@ -233,7 +229,7 @@ class MessageHandler(object):
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or because there
# is a user in the room that the AS is "interested in"
if requester.app_service and user_id not in users_with_profile:
if False and requester.app_service and user_id not in users_with_profile:
for uid in users_with_profile:
if requester.app_service.is_interested_in_user(uid):
break
@@ -888,60 +884,19 @@ class EventCreationHandler(object):
yield self.base_handler.maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Validate a newly added alias or newly added alt_aliases.
original_alias = None
original_alt_aliases = set()
original_event_id = event.unsigned.get("replaces_state")
if original_event_id:
original_event = yield self.store.get_event(original_event_id)
if original_event:
original_alias = original_event.content.get("alias", None)
original_alt_aliases = original_event.content.get("alt_aliases", [])
# Check the alias is currently valid (if it has changed).
# Check the alias is acually valid (at this time at least)
room_alias_str = event.content.get("alias", None)
directory_handler = self.hs.get_handlers().directory_handler
if room_alias_str and room_alias_str != original_alias:
if room_alias_str:
room_alias = RoomAlias.from_string(room_alias_str)
directory_handler = self.hs.get_handlers().directory_handler
mapping = yield directory_handler.get_association(room_alias)
if mapping["room_id"] != event.room_id:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (room_alias_str,),
Codes.BAD_ALIAS,
)
# Check that alt_aliases is the proper form.
alt_aliases = event.content.get("alt_aliases", [])
if not isinstance(alt_aliases, (list, tuple)):
raise SynapseError(
400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
)
# If the old version of alt_aliases is of an unknown form,
# completely replace it.
if not isinstance(original_alt_aliases, (list, tuple)):
original_alt_aliases = []
# Check that each alias is currently valid.
new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
if new_alt_aliases:
for alias_str in new_alt_aliases:
room_alias = RoomAlias.from_string(alias_str)
mapping = yield directory_handler.get_association(room_alias)
if mapping["room_id"] != event.room_id:
raise SynapseError(
400,
"Room alias %s does not point to the room"
% (room_alias_str,),
Codes.BAD_ALIAS,
)
federation_handler = self.hs.get_handlers().federation_handler
if event.type == EventTypes.Member:
@@ -977,9 +932,10 @@ class EventCreationHandler(object):
# way? If we have been invited by a remote server, we need
# to get them to sign the event.
returned_invite = yield defer.ensureDeferred(
federation_handler.send_invite(invitee.domain, event)
returned_invite = yield federation_handler.send_invite(
invitee.domain, event
)
event.unsigned.pop("room_state", None)
# TODO: Make sure the signatures actually are correct.
@@ -1057,10 +1013,11 @@ class EventCreationHandler(object):
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)
async def _bump_active_time(self, user):
@defer.inlineCallbacks
def _bump_active_time(self, user):
try:
presence = self.hs.get_presence_handler()
await presence.bump_presence_active_time(user)
yield presence.bump_presence_active_time(user)
except Exception:
logger.exception("Error bumping presence active time")

View File

@@ -133,7 +133,7 @@ class PaginationHandler(object):
include_null = False
logger.info(
"[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)",
"[purge] Running purge job for %d < max_lifetime <= %d (include NULLs = %s)",
min_ms,
max_ms,
include_null,
@@ -335,9 +335,7 @@ class PaginationHandler(object):
(
membership,
member_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id, user_id, allow_departed_users=True
)
) = await self.auth.check_in_room_or_world_readable(room_id, user_id)
if source_config.direction == "b":
# if we're going backwards, we might need to backfill. This

View File

@@ -24,12 +24,11 @@ The methods that define policy are:
import logging
from contextlib import contextmanager
from typing import Dict, List, Set
from typing import Dict, Set
from six import iteritems, itervalues
from prometheus_client import Counter
from typing_extensions import ContextManager
from twisted.internet import defer
@@ -43,14 +42,10 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
from synapse.types import UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.descriptors import cached
from synapse.util.caches.descriptors import cachedInlineCallbacks
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
MYPY = False
if MYPY:
import synapse.server
logger = logging.getLogger(__name__)
@@ -102,6 +97,7 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
class PresenceHandler(object):
def __init__(self, hs: "synapse.server.HomeServer"):
self.hs = hs
self.is_mine = hs.is_mine
self.is_mine_id = hs.is_mine_id
self.server_name = hs.hostname
self.clock = hs.get_clock()
@@ -154,7 +150,7 @@ class PresenceHandler(object):
# Set of users who have presence in the `user_to_current_state` that
# have not yet been persisted
self.unpersisted_users_changes = set() # type: Set[str]
self.unpersisted_users_changes = set()
hs.get_reactor().addSystemEventTrigger(
"before",
@@ -164,11 +160,12 @@ class PresenceHandler(object):
self._on_shutdown,
)
self.serial_to_user = {}
self._next_serial = 1
# Keeps track of the number of *ongoing* syncs on this process. While
# this is non zero a user will never go offline.
self.user_to_num_current_syncs = {} # type: Dict[str, int]
self.user_to_num_current_syncs = {}
# Keeps track of the number of *ongoing* syncs on other processes.
# While any sync is ongoing on another process the user will never
@@ -216,7 +213,8 @@ class PresenceHandler(object):
self._event_pos = self.store.get_current_events_token()
self._event_processing = False
async def _on_shutdown(self):
@defer.inlineCallbacks
def _on_shutdown(self):
"""Gets called when shutting down. This lets us persist any updates that
we haven't yet persisted, e.g. updates that only changes some internal
timers. This allows changes to persist across startup without having to
@@ -237,7 +235,7 @@ class PresenceHandler(object):
if self.unpersisted_users_changes:
await self.store.update_presence(
yield self.store.update_presence(
[
self.user_to_current_state[user_id]
for user_id in self.unpersisted_users_changes
@@ -245,7 +243,8 @@ class PresenceHandler(object):
)
logger.info("Finished _on_shutdown")
async def _persist_unpersisted_changes(self):
@defer.inlineCallbacks
def _persist_unpersisted_changes(self):
"""We periodically persist the unpersisted changes, as otherwise they
may stack up and slow down shutdown times.
"""
@@ -254,11 +253,12 @@ class PresenceHandler(object):
if unpersisted:
logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
await self.store.update_presence(
yield self.store.update_presence(
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
async def _update_states(self, new_states):
@defer.inlineCallbacks
def _update_states(self, new_states):
"""Updates presence of users. Sets the appropriate timeouts. Pokes
the notifier and federation if and only if the changed presence state
should be sent to clients/servers.
@@ -267,7 +267,7 @@ class PresenceHandler(object):
with Measure(self.clock, "presence_update_states"):
# NOTE: We purposefully don't await between now and when we've
# NOTE: We purposefully don't yield between now and when we've
# calculated what we want to do with the new states, to avoid races.
to_notify = {} # Changes we want to notify everyone about
@@ -311,9 +311,9 @@ class PresenceHandler(object):
if to_notify:
notified_presence_counter.inc(len(to_notify))
await self._persist_and_notify(list(to_notify.values()))
yield self._persist_and_notify(list(to_notify.values()))
self.unpersisted_users_changes |= {s.user_id for s in new_states}
self.unpersisted_users_changes |= set(s.user_id for s in new_states)
self.unpersisted_users_changes -= set(to_notify.keys())
to_federation_ping = {
@@ -326,7 +326,7 @@ class PresenceHandler(object):
self._push_to_remotes(to_federation_ping.values())
async def _handle_timeouts(self):
def _handle_timeouts(self):
"""Checks the presence of users that have timed out and updates as
appropriate.
"""
@@ -368,9 +368,10 @@ class PresenceHandler(object):
now=now,
)
return await self._update_states(changes)
return self._update_states(changes)
async def bump_presence_active_time(self, user):
@defer.inlineCallbacks
def bump_presence_active_time(self, user):
"""We've seen the user do something that indicates they're interacting
with the app.
"""
@@ -382,17 +383,16 @@ class PresenceHandler(object):
bump_active_time_counter.inc()
prev_state = await self.current_state_for_user(user_id)
prev_state = yield self.current_state_for_user(user_id)
new_fields = {"last_active_ts": self.clock.time_msec()}
if prev_state.state == PresenceState.UNAVAILABLE:
new_fields["state"] = PresenceState.ONLINE
await self._update_states([prev_state.copy_and_replace(**new_fields)])
yield self._update_states([prev_state.copy_and_replace(**new_fields)])
async def user_syncing(
self, user_id: str, affect_presence: bool = True
) -> ContextManager[None]:
@defer.inlineCallbacks
def user_syncing(self, user_id, affect_presence=True):
"""Returns a context manager that should surround any stream requests
from the user.
@@ -415,11 +415,11 @@ class PresenceHandler(object):
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
self.user_to_num_current_syncs[user_id] = curr_sync + 1
prev_state = await self.current_state_for_user(user_id)
prev_state = yield self.current_state_for_user(user_id)
if prev_state.state == PresenceState.OFFLINE:
# If they're currently offline then bring them online, otherwise
# just update the last sync times.
await self._update_states(
yield self._update_states(
[
prev_state.copy_and_replace(
state=PresenceState.ONLINE,
@@ -429,7 +429,7 @@ class PresenceHandler(object):
]
)
else:
await self._update_states(
yield self._update_states(
[
prev_state.copy_and_replace(
last_user_sync_ts=self.clock.time_msec()
@@ -437,12 +437,13 @@ class PresenceHandler(object):
]
)
async def _end():
@defer.inlineCallbacks
def _end():
try:
self.user_to_num_current_syncs[user_id] -= 1
prev_state = await self.current_state_for_user(user_id)
await self._update_states(
prev_state = yield self.current_state_for_user(user_id)
yield self._update_states(
[
prev_state.copy_and_replace(
last_user_sync_ts=self.clock.time_msec()
@@ -479,7 +480,8 @@ class PresenceHandler(object):
else:
return set()
async def update_external_syncs_row(
@defer.inlineCallbacks
def update_external_syncs_row(
self, process_id, user_id, is_syncing, sync_time_msec
):
"""Update the syncing users for an external process as a delta.
@@ -492,8 +494,8 @@ class PresenceHandler(object):
is_syncing (bool): Whether or not the user is now syncing
sync_time_msec(int): Time in ms when the user was last syncing
"""
with (await self.external_sync_linearizer.queue(process_id)):
prev_state = await self.current_state_for_user(user_id)
with (yield self.external_sync_linearizer.queue(process_id)):
prev_state = yield self.current_state_for_user(user_id)
process_presence = self.external_process_to_current_syncs.setdefault(
process_id, set()
@@ -523,24 +525,25 @@ class PresenceHandler(object):
process_presence.discard(user_id)
if updates:
await self._update_states(updates)
yield self._update_states(updates)
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
async def update_external_syncs_clear(self, process_id):
@defer.inlineCallbacks
def update_external_syncs_clear(self, process_id):
"""Marks all users that had been marked as syncing by a given process
as offline.
Used when the process has stopped/disappeared.
"""
with (await self.external_sync_linearizer.queue(process_id)):
with (yield self.external_sync_linearizer.queue(process_id)):
process_presence = self.external_process_to_current_syncs.pop(
process_id, set()
)
prev_states = await self.current_state_for_users(process_presence)
prev_states = yield self.current_state_for_users(process_presence)
time_now_ms = self.clock.time_msec()
await self._update_states(
yield self._update_states(
[
prev_state.copy_and_replace(last_user_sync_ts=time_now_ms)
for prev_state in itervalues(prev_states)
@@ -548,13 +551,15 @@ class PresenceHandler(object):
)
self.external_process_last_updated_ms.pop(process_id, None)
async def current_state_for_user(self, user_id):
@defer.inlineCallbacks
def current_state_for_user(self, user_id):
"""Get the current presence state for a user.
"""
res = await self.current_state_for_users([user_id])
res = yield self.current_state_for_users([user_id])
return res[user_id]
async def current_state_for_users(self, user_ids):
@defer.inlineCallbacks
def current_state_for_users(self, user_ids):
"""Get the current presence state for multiple users.
Returns:
@@ -569,7 +574,7 @@ class PresenceHandler(object):
if missing:
# There are things not in our in memory cache. Lets pull them out of
# the database.
res = await self.store.get_presence_for_users(missing)
res = yield self.store.get_presence_for_users(missing)
states.update(res)
missing = [user_id for user_id, state in iteritems(states) if not state]
@@ -582,13 +587,14 @@ class PresenceHandler(object):
return states
async def _persist_and_notify(self, states):
@defer.inlineCallbacks
def _persist_and_notify(self, states):
"""Persist states in the database, poke the notifier and send to
interested remote servers
"""
stream_id, max_token = await self.store.update_presence(states)
stream_id, max_token = yield self.store.update_presence(states)
parties = await get_interested_parties(self.store, states)
parties = yield get_interested_parties(self.store, states)
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
@@ -600,8 +606,9 @@ class PresenceHandler(object):
self._push_to_remotes(states)
async def notify_for_states(self, state, stream_id):
parties = await get_interested_parties(self.store, [state])
@defer.inlineCallbacks
def notify_for_states(self, state, stream_id):
parties = yield get_interested_parties(self.store, [state])
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
@@ -619,7 +626,8 @@ class PresenceHandler(object):
"""
self.federation.send_presence(states)
async def incoming_presence(self, origin, content):
@defer.inlineCallbacks
def incoming_presence(self, origin, content):
"""Called when we receive a `m.presence` EDU from a remote server.
"""
now = self.clock.time_msec()
@@ -662,19 +670,21 @@ class PresenceHandler(object):
new_fields["status_msg"] = push.get("status_msg", None)
new_fields["currently_active"] = push.get("currently_active", False)
prev_state = await self.current_state_for_user(user_id)
prev_state = yield self.current_state_for_user(user_id)
updates.append(prev_state.copy_and_replace(**new_fields))
if updates:
federation_presence_counter.inc(len(updates))
await self._update_states(updates)
yield self._update_states(updates)
async def get_state(self, target_user, as_event=False):
results = await self.get_states([target_user.to_string()], as_event=as_event)
@defer.inlineCallbacks
def get_state(self, target_user, as_event=False):
results = yield self.get_states([target_user.to_string()], as_event=as_event)
return results[0]
async def get_states(self, target_user_ids, as_event=False):
@defer.inlineCallbacks
def get_states(self, target_user_ids, as_event=False):
"""Get the presence state for users.
Args:
@@ -685,10 +695,10 @@ class PresenceHandler(object):
list
"""
updates = await self.current_state_for_users(target_user_ids)
updates = yield self.current_state_for_users(target_user_ids)
updates = list(updates.values())
for user_id in set(target_user_ids) - {u.user_id for u in updates}:
for user_id in set(target_user_ids) - set(u.user_id for u in updates):
updates.append(UserPresenceState.default(user_id))
now = self.clock.time_msec()
@@ -703,7 +713,8 @@ class PresenceHandler(object):
else:
return updates
async def set_state(self, target_user, state, ignore_status_msg=False):
@defer.inlineCallbacks
def set_state(self, target_user, state, ignore_status_msg=False):
"""Set the presence state of the user.
"""
status_msg = state.get("status_msg", None)
@@ -719,7 +730,7 @@ class PresenceHandler(object):
user_id = target_user.to_string()
prev_state = await self.current_state_for_user(user_id)
prev_state = yield self.current_state_for_user(user_id)
new_fields = {"state": presence}
@@ -730,15 +741,16 @@ class PresenceHandler(object):
if presence == PresenceState.ONLINE:
new_fields["last_active_ts"] = self.clock.time_msec()
await self._update_states([prev_state.copy_and_replace(**new_fields)])
yield self._update_states([prev_state.copy_and_replace(**new_fields)])
async def is_visible(self, observed_user, observer_user):
@defer.inlineCallbacks
def is_visible(self, observed_user, observer_user):
"""Returns whether a user can see another user's presence.
"""
observer_room_ids = await self.store.get_rooms_for_user(
observer_room_ids = yield self.store.get_rooms_for_user(
observer_user.to_string()
)
observed_room_ids = await self.store.get_rooms_for_user(
observed_room_ids = yield self.store.get_rooms_for_user(
observed_user.to_string()
)
@@ -747,7 +759,8 @@ class PresenceHandler(object):
return False
async def get_all_presence_updates(self, last_id, current_id):
@defer.inlineCallbacks
def get_all_presence_updates(self, last_id, current_id):
"""
Gets a list of presence update rows from between the given stream ids.
Each row has:
@@ -762,7 +775,7 @@ class PresenceHandler(object):
"""
# TODO(markjh): replicate the unpersisted changes.
# This could use the in-memory stores for recent changes.
rows = await self.store.get_all_presence_updates(last_id, current_id)
rows = yield self.store.get_all_presence_updates(last_id, current_id)
return rows
def notify_new_event(self):
@@ -773,18 +786,20 @@ class PresenceHandler(object):
if self._event_processing:
return
async def _process_presence():
@defer.inlineCallbacks
def _process_presence():
assert not self._event_processing
self._event_processing = True
try:
await self._unsafe_process()
yield self._unsafe_process()
finally:
self._event_processing = False
run_as_background_process("presence.notify_new_event", _process_presence)
async def _unsafe_process(self):
@defer.inlineCallbacks
def _unsafe_process(self):
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "presence_delta"):
@@ -797,10 +812,10 @@ class PresenceHandler(object):
self._event_pos,
room_max_stream_ordering,
)
max_pos, deltas = await self.store.get_current_state_deltas(
max_pos, deltas = yield self.store.get_current_state_deltas(
self._event_pos, room_max_stream_ordering
)
await self._handle_state_delta(deltas)
yield self._handle_state_delta(deltas)
self._event_pos = max_pos
@@ -809,7 +824,8 @@ class PresenceHandler(object):
max_pos
)
async def _handle_state_delta(self, deltas):
@defer.inlineCallbacks
def _handle_state_delta(self, deltas):
"""Process current state deltas to find new joins that need to be
handled.
"""
@@ -830,13 +846,13 @@ class PresenceHandler(object):
# joins.
continue
event = await self.store.get_event(event_id, allow_none=True)
event = yield self.store.get_event(event_id, allow_none=True)
if not event or event.content.get("membership") != Membership.JOIN:
# We only care about joins
continue
if prev_event_id:
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
if (
prev_event
and prev_event.content.get("membership") == Membership.JOIN
@@ -844,9 +860,10 @@ class PresenceHandler(object):
# Ignore changes to join events.
continue
await self._on_user_joined_room(room_id, state_key)
yield self._on_user_joined_room(room_id, state_key)
async def _on_user_joined_room(self, room_id, user_id):
@defer.inlineCallbacks
def _on_user_joined_room(self, room_id, user_id):
"""Called when we detect a user joining the room via the current state
delta stream.
@@ -865,11 +882,11 @@ class PresenceHandler(object):
# TODO: We should be able to filter the hosts down to those that
# haven't previously seen the user
state = await self.current_state_for_user(user_id)
hosts = await self.state.get_current_hosts_in_room(room_id)
state = yield self.current_state_for_user(user_id)
hosts = yield self.state.get_current_hosts_in_room(room_id)
# Filter out ourselves.
hosts = {host for host in hosts if host != self.server_name}
hosts = set(host for host in hosts if host != self.server_name)
self.federation.send_presence_to_destinations(
states=[state], destinations=hosts
@@ -886,10 +903,10 @@ class PresenceHandler(object):
# TODO: Check that this is actually a new server joining the
# room.
user_ids = await self.state.get_current_users_in_room(room_id)
user_ids = yield self.state.get_current_users_in_room(room_id)
user_ids = list(filter(self.is_mine_id, user_ids))
states = await self.current_state_for_users(user_ids)
states = yield self.current_state_for_users(user_ids)
# Filter out old presence, i.e. offline presence states where
# the user hasn't been active for a week. We can change this
@@ -979,8 +996,9 @@ class PresenceEventSource(object):
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
@defer.inlineCallbacks
@log_function
async def get_new_events(
def get_new_events(
self,
user,
from_key,
@@ -1027,7 +1045,7 @@ class PresenceEventSource(object):
presence = self.get_presence_handler()
stream_change_cache = self.store.presence_stream_cache
users_interested_in = await self._get_interested_in(user, explicit_room_id)
users_interested_in = yield self._get_interested_in(user, explicit_room_id)
user_ids_changed = set()
changed = None
@@ -1053,7 +1071,7 @@ class PresenceEventSource(object):
else:
user_ids_changed = users_interested_in
updates = await presence.current_state_for_users(user_ids_changed)
updates = yield presence.current_state_for_users(user_ids_changed)
if include_offline:
return (list(updates.values()), max_token)
@@ -1066,11 +1084,11 @@ class PresenceEventSource(object):
def get_current_key(self):
return self.store.get_current_presence_token()
async def get_pagination_rows(self, user, pagination_config, key):
return await self.get_new_events(user, from_key=None, include_offline=False)
def get_pagination_rows(self, user, pagination_config, key):
return self.get_new_events(user, from_key=None, include_offline=False)
@cached(num_args=2, cache_context=True)
async def _get_interested_in(self, user, explicit_room_id, cache_context):
@cachedInlineCallbacks(num_args=2, cache_context=True)
def _get_interested_in(self, user, explicit_room_id, cache_context):
"""Returns the set of users that the given user should see presence
updates for
"""
@@ -1078,13 +1096,13 @@ class PresenceEventSource(object):
users_interested_in = set()
users_interested_in.add(user_id) # So that we receive our own presence
users_who_share_room = await self.store.get_users_who_share_room_with_user(
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
user_id, on_invalidate=cache_context.invalidate
)
users_interested_in.update(users_who_share_room)
if explicit_room_id:
user_ids = await self.store.get_users_in_room(
user_ids = yield self.store.get_users_in_room(
explicit_room_id, on_invalidate=cache_context.invalidate
)
users_interested_in.update(user_ids)
@@ -1259,8 +1277,8 @@ def get_interested_parties(store, states):
2-tuple: `(room_ids_to_states, users_to_states)`,
with each item being a dict of `entity_name` -> `[UserPresenceState]`
"""
room_ids_to_states = {} # type: Dict[str, List[UserPresenceState]]
users_to_states = {} # type: Dict[str, List[UserPresenceState]]
room_ids_to_states = {}
users_to_states = {}
for state in states:
room_ids = yield store.get_rooms_for_user(state.user_id)
for room_id in room_ids:

View File

@@ -28,7 +28,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, create_requester, get_domain_from_id
from synapse.types import UserID, get_domain_from_id
from ._base import BaseHandler
@@ -165,12 +165,6 @@ class BaseProfileHandler(BaseHandler):
if new_displayname == "":
new_displayname = None
# If the admin changes the display name of a user, the requesting user cannot send
# the join event to update the displayname in the rooms.
# This must be done by the target user himself.
if by_admin:
requester = create_requester(target_user)
yield self.store.set_profile_displayname(target_user.localpart, new_displayname)
if self.hs.config.user_directory_search_all_users:
@@ -223,10 +217,6 @@ class BaseProfileHandler(BaseHandler):
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
)
# Same like set_displayname
if by_admin:
requester = create_requester(target_user)
yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url)
if self.hs.config.user_directory_search_all_users:

View File

@@ -94,7 +94,7 @@ class ReceiptsHandler(BaseHandler):
# no new receipts
return False
affected_room_ids = list({r.room_id for r in receipts})
affected_room_ids = list(set([r.room_id for r in receipts]))
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
# Note that the min here shouldn't be relied upon to be accurate.

View File

@@ -64,21 +64,18 @@ class RoomCreationHandler(BaseHandler):
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": True,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": False,
"power_level_content_override": {},
},
}
@@ -149,9 +146,7 @@ class RoomCreationHandler(BaseHandler):
return ret
@defer.inlineCallbacks
def _upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
):
def _upgrade_room(self, requester, old_room_id, new_version):
user_id = requester.user.to_string()
# start by allocating a new room id
@@ -264,7 +259,7 @@ class RoomCreationHandler(BaseHandler):
for v in ("invite", "events_default"):
current = int(pl_content.get(v, 0))
if current < restricted_level:
logger.debug(
logger.info(
"Setting level for %s in %s to %i (was %i)",
v,
old_room_id,
@@ -274,7 +269,7 @@ class RoomCreationHandler(BaseHandler):
pl_content[v] = restricted_level
updated = True
else:
logger.debug("Not setting level for %s (already %i)", v, current)
logger.info("Not setting level for %s (already %i)", v, current)
if updated:
try:
@@ -292,6 +287,16 @@ class RoomCreationHandler(BaseHandler):
except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e)
new_pl_content = copy_power_levels_contents(old_room_pl_state.content)
# pre-msc2260 rooms may not have the right setting for aliases. If no other
# value is set, set it now.
events_default = new_pl_content.get("events_default", 0)
new_pl_content.setdefault("events", {}).setdefault(
EventTypes.Aliases, events_default
)
logger.info("Setting correct PLs in new room to %s", new_pl_content)
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
@@ -299,7 +304,7 @@ class RoomCreationHandler(BaseHandler):
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": old_room_pl_state.content,
"content": new_pl_content,
},
ratelimit=False,
)
@@ -345,7 +350,7 @@ class RoomCreationHandler(BaseHandler):
# If so, mark the new room as non-federatable as well
creation_content["m.federate"] = False
initial_state = {}
initial_state = dict()
# Replicate relevant room events
types_to_copy = (
@@ -440,21 +445,19 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks
def _move_aliases_to_new_room(
self,
requester: Requester,
old_room_id: str,
new_room_id: str,
old_room_state: StateMap[str],
self, requester, old_room_id, new_room_id, old_room_state
):
directory_handler = self.hs.get_handlers().directory_handler
aliases = yield self.store.get_aliases_for_room(old_room_id)
# check to see if we have a canonical alias.
canonical_alias_event = None
canonical_alias = None
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event_id:
canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
if canonical_alias_event:
canonical_alias = canonical_alias_event.content.get("alias", "")
# first we try to remove the aliases from the old room (we suppress sending
# the room_aliases event until the end).
@@ -472,7 +475,9 @@ class RoomCreationHandler(BaseHandler):
for alias_str in aliases:
alias = RoomAlias.from_string(alias_str)
try:
yield directory_handler.delete_association(requester, alias)
yield directory_handler.delete_association(
requester, alias, send_event=False
)
removed_aliases.append(alias_str)
except SynapseError as e:
logger.warning("Unable to remove alias %s from old room: %s", alias, e)
@@ -482,6 +487,19 @@ class RoomCreationHandler(BaseHandler):
if not removed_aliases:
return
try:
# this can fail if, for some reason, our user doesn't have perms to send
# m.room.aliases events in the old room (note that we've already checked that
# they have perms to send a tombstone event, so that's not terribly likely).
#
# If that happens, it's regrettable, but we should carry on: it's the same
# as when you remove an alias from the directory normally - it just means that
# the aliases event gets out of sync with the directory
# (cf https://github.com/vector-im/riot-web/issues/2369)
yield directory_handler.send_room_alias_update_event(requester, old_room_id)
except AuthError as e:
logger.warning("Failed to send updated alias event on old room: %s", e)
# we can now add any aliases we successfully removed to the new room.
for alias in removed_aliases:
try:
@@ -490,6 +508,7 @@ class RoomCreationHandler(BaseHandler):
RoomAlias.from_string(alias),
new_room_id,
servers=(self.hs.hostname,),
send_event=False,
check_membership=False,
)
logger.info("Moved alias %s to new room", alias)
@@ -498,10 +517,8 @@ class RoomCreationHandler(BaseHandler):
# checking module decides it shouldn't, or similar.
logger.error("Error adding alias %s to new room: %s", alias, e)
# If a canonical alias event existed for the old room, fire a canonical
# alias event for the new room with a copy of the information.
try:
if canonical_alias_event:
if canonical_alias and (canonical_alias in removed_aliases):
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
@@ -509,10 +526,12 @@ class RoomCreationHandler(BaseHandler):
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": canonical_alias_event.content,
"content": {"alias": canonical_alias},
},
ratelimit=False,
)
yield directory_handler.send_room_alias_update_event(requester, new_room_id)
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
@@ -560,13 +579,9 @@ class RoomCreationHandler(BaseHandler):
# Check whether the third party rules allows/changes the room create
# request.
event_allowed = yield self.third_party_event_rules.on_create_room(
yield self.third_party_event_rules.on_create_room(
requester, config, is_requester_admin=is_requester_admin
)
if not event_allowed:
raise SynapseError(
403, "You are not permitted to create rooms", Codes.FORBIDDEN
)
if not is_requester_admin and not self.spam_checker.user_may_create_room(
user_id
@@ -642,6 +657,7 @@ class RoomCreationHandler(BaseHandler):
room_id=room_id,
room_alias=room_alias,
servers=[self.hs.hostname],
send_event=False,
check_membership=False,
)
@@ -738,6 +754,7 @@ class RoomCreationHandler(BaseHandler):
if room_alias:
result["room_alias"] = room_alias.to_string()
yield directory_handler.send_room_alias_update_event(requester, room_id)
return result
@@ -765,7 +782,7 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks
def send(etype, content, **kwargs):
event = create(etype, content, **kwargs)
logger.debug("Sending %s in new room", etype)
logger.info("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event(
creator, event, ratelimit=False
)
@@ -779,7 +796,7 @@ class RoomCreationHandler(BaseHandler):
creation_content.update({"creator": creator_id})
yield send(etype=EventTypes.Create, content=creation_content)
logger.debug("Sending %s in new room", EventTypes.Member)
logger.info("Sending %s in new room", EventTypes.Member)
yield self.room_member_handler.update_membership(
creator,
creator.user,
@@ -804,24 +821,23 @@ class RoomCreationHandler(BaseHandler):
EventTypes.RoomHistoryVisibility: 100,
EventTypes.CanonicalAlias: 50,
EventTypes.RoomAvatar: 50,
EventTypes.Tombstone: 100,
EventTypes.ServerACL: 100,
# MSC2260: Allow everybody to send alias events by default
# This will be reudundant on pre-MSC2260 rooms, since the
# aliases event is special-cased.
EventTypes.Aliases: 0,
},
"events_default": 0,
"state_default": 50,
"ban": 50,
"kick": 50,
"redact": 50,
"invite": 50,
"invite": 0,
}
if config["original_invitees_have_ops"]:
for invitee in invite_list:
power_level_content["users"][invitee] = 100
# Power levels overrides are defined per chat preset
power_level_content.update(config["power_level_content_override"])
if power_level_content_override:
power_level_content.update(power_level_content_override)

View File

@@ -43,7 +43,8 @@ class RoomListHandler(BaseHandler):
def __init__(self, hs):
super(RoomListHandler, self).__init__(hs)
self.enable_room_list_search = hs.config.enable_room_list_search
self.response_cache = ResponseCache(hs, "room_list")
self.response_cache = ResponseCache(hs, "room_list", timeout_ms=10 * 60 * 1000)
self.remote_response_cache = ResponseCache(
hs, "remote_room_list", timeout_ms=30 * 1000
)
@@ -216,6 +217,15 @@ class RoomListHandler(BaseHandler):
direction_is_forward=False,
).to_token()
for room in results:
# populate search result entries with additional fields, namely
# 'aliases'
room_id = room["room_id"]
aliases = yield self.store.get_aliases_for_room(room_id)
if aliases:
room["aliases"] = aliases
response["chunk"] = results
response["total_room_count_estimate"] = yield self.store.count_public_rooms(

View File

@@ -62,6 +62,7 @@ class RoomMemberHandler(object):
self.event_creation_handler = hs.get_event_creation_handler()
self.member_linearizer = Linearizer(name="member")
self.member_limiter = Linearizer(max_count=10, name="member_as_limiter")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
@@ -269,19 +270,38 @@ class RoomMemberHandler(object):
):
key = (room_id,)
with (yield self.member_linearizer.queue(key)):
result = yield self._update_membership(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
require_consent=require_consent,
)
as_id = object()
if requester.app_service:
as_id = requester.app_service.id
then = self.clock.time_msec()
with (yield self.member_limiter.queue(as_id)):
diff = self.clock.time_msec() - then
if diff > 80 * 1000:
# haproxy would have timed the request out anyway...
raise SynapseError(504, "took to long to process")
with (yield self.member_linearizer.queue(key)):
diff = self.clock.time_msec() - then
if diff > 80 * 1000:
# haproxy would have timed the request out anyway...
raise SynapseError(504, "took to long to process")
result = yield self._update_membership(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
require_consent=require_consent,
)
return result
@@ -944,10 +964,8 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
yield defer.ensureDeferred(
self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
yield self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
yield self._user_joined_room(user, room_id)
@@ -984,10 +1002,8 @@ class RoomMemberMasterHandler(RoomMemberHandler):
"""
fed_handler = self.federation_handler
try:
ret = yield defer.ensureDeferred(
fed_handler.do_remotely_reject_invite(
remote_room_hosts, room_id, target.to_string(), content=content,
)
ret = yield fed_handler.do_remotely_reject_invite(
remote_room_hosts, room_id, target.to_string(), content=content,
)
return ret
except Exception as e:

View File

@@ -23,9 +23,9 @@ from saml2.client import Saml2Client
from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.http.server import finish_request
from synapse.http.servlet import parse_string
from synapse.module_api import ModuleApi
from synapse.rest.client.v1.login import SSOAuthHandler
from synapse.types import (
UserID,
map_username_to_mxid_localpart,
@@ -48,7 +48,7 @@ class Saml2SessionData:
class SamlHandler:
def __init__(self, hs):
self._saml_client = Saml2Client(hs.config.saml2_sp_config)
self._auth_handler = hs.get_auth_handler()
self._sso_auth_handler = SSOAuthHandler(hs)
self._registration_handler = hs.get_registration_handler()
self._clock = hs.get_clock()
@@ -74,8 +74,6 @@ class SamlHandler:
# a lock on the mappings
self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock)
self._error_html_content = hs.config.saml2_error_html_content
def handle_redirect_request(self, client_redirect_url):
"""Handle an incoming request to /login/sso/redirect
@@ -117,23 +115,8 @@ class SamlHandler:
# the dict.
self.expire_sessions()
try:
user_id = await self._map_saml_response_to_user(resp_bytes, relay_state)
except Exception as e:
# If decoding the response or mapping it to a user failed, then log the
# error and tell the user that something went wrong.
logger.error(e)
request.setResponseCode(400)
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.setHeader(
b"Content-Length", b"%d" % (len(self._error_html_content),)
)
request.write(self._error_html_content.encode("utf8"))
finish_request(request)
return
self._auth_handler.complete_sso_login(user_id, request, relay_state)
user_id = await self._map_saml_response_to_user(resp_bytes, relay_state)
self._sso_auth_handler.complete_sso_login(user_id, request, relay_state)
async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url):
try:

View File

@@ -184,7 +184,7 @@ class SearchHandler(BaseHandler):
membership_list=[Membership.JOIN],
# membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
)
room_ids = {r.room_id for r in rooms}
room_ids = set(r.room_id for r in rooms)
# If doing a subset of all rooms seearch, check if any of the rooms
# are from an upgraded room, and search their contents as well
@@ -374,12 +374,12 @@ class SearchHandler(BaseHandler):
).to_string()
if include_profile:
senders = {
senders = set(
ev.sender
for ev in itertools.chain(
res["events_before"], [event], res["events_after"]
)
}
)
if res["events_after"]:
last_event_id = res["events_after"][-1].event_id
@@ -421,7 +421,7 @@ class SearchHandler(BaseHandler):
state_results = {}
if include_state:
rooms = {e.room_id for e in allowed_events}
rooms = set(e.room_id for e in allowed_events)
for room_id in rooms:
state = yield self.state_handler.get_current_state(room_id)
state_results[room_id] = list(state.values())

View File

@@ -13,12 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from twisted.internet import defer
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.types import Requester
from ._base import BaseHandler
@@ -34,17 +32,14 @@ class SetPasswordHandler(BaseHandler):
self._device_handler = hs.get_device_handler()
@defer.inlineCallbacks
def set_password(
self,
user_id: str,
new_password: str,
logout_devices: bool,
requester: Optional[Requester] = None,
):
def set_password(self, user_id, newpassword, requester=None):
if not self.hs.config.password_localdb_enabled:
raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
password_hash = yield self._auth_handler.hash(new_password)
password_hash = yield self._auth_handler.hash(newpassword)
except_device_id = requester.device_id if requester else None
except_access_token_id = requester.access_token_id if requester else None
try:
yield self.store.user_set_password_hash(user_id, password_hash)
@@ -53,18 +48,14 @@ class SetPasswordHandler(BaseHandler):
raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
raise e
# Optionally, log out all of the user's other sessions.
if logout_devices:
except_device_id = requester.device_id if requester else None
except_access_token_id = requester.access_token_id if requester else None
# we want to log out all of the user's other sessions. First delete
# all his other devices.
yield self._device_handler.delete_all_devices_for_user(
user_id, except_device_id=except_device_id
)
# First delete all of their other devices.
yield self._device_handler.delete_all_devices_for_user(
user_id, except_device_id=except_device_id
)
# and now delete any access tokens which weren't associated with
# devices (or were associated with this device).
yield self._auth_handler.delete_access_tokens_for_user(
user_id, except_token_id=except_access_token_id
)
# and now delete any access tokens which weren't associated with
# devices (or were associated with this device).
yield self._auth_handler.delete_access_tokens_for_user(
user_id, except_token_id=except_access_token_id
)

View File

@@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
room_state["guest_access"] = event_content.get("guest_access")
for room_id, state in room_to_state_updates.items():
logger.debug("Updating room_stats_state for %s: %s", room_id, state)
logger.info("Updating room_stats_state for %s: %s", room_id, state)
yield self.store.update_room_state(room_id, state)
return room_to_stats_deltas, user_to_stats_deltas

File diff suppressed because it is too large Load Diff

View File

@@ -125,7 +125,7 @@ class TypingHandler(object):
if target_user_id != auth_user_id:
raise AuthError(400, "Cannot set another user's typing state")
yield self.auth.check_user_in_room(room_id, target_user_id)
yield self.auth.check_joined_room(room_id, target_user_id)
logger.debug("%s has started typing in %s", target_user_id, room_id)
@@ -155,7 +155,7 @@ class TypingHandler(object):
if target_user_id != auth_user_id:
raise AuthError(400, "Cannot set another user's typing state")
yield self.auth.check_user_in_room(room_id, target_user_id)
yield self.auth.check_joined_room(room_id, target_user_id)
logger.debug("%s has stopped typing in %s", target_user_id, room_id)
@@ -198,7 +198,7 @@ class TypingHandler(object):
now=now, obj=member, then=now + FEDERATION_PING_INTERVAL
)
for domain in {get_domain_from_id(u) for u in users}:
for domain in set(get_domain_from_id(u) for u in users):
if domain != self.server_name:
logger.debug("sending typing update to %s", domain)
self.federation.build_and_send_edu(
@@ -231,7 +231,7 @@ class TypingHandler(object):
return
users = yield self.state.get_current_users_in_room(room_id)
domains = {get_domain_from_id(u) for u in users}
domains = set(get_domain_from_id(u) for u in users)
if self.server_name in domains:
logger.info("Got typing update from %s: %r", user_id, content)

Some files were not shown because too many files have changed in this diff Show More