Compare commits
383 Commits
v0.21.0
...
matthew/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd8494a9a1 | ||
|
|
90d70af269 | ||
|
|
b23cb8fba8 | ||
|
|
e4a709eda3 | ||
|
|
d5325d7ef1 | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1a398b19fd | ||
|
|
f4c8cd5e85 | ||
|
|
b8d832a08c | ||
|
|
e3edca3b5d | ||
|
|
cacfa04cb6 | ||
|
|
e591f7b3f0 | ||
|
|
7141f1a5cc | ||
|
|
44edac0497 | ||
|
|
29e1c717c3 | ||
|
|
94133d7ce8 | ||
|
|
b15c2b7971 | ||
|
|
ba8fdc925c | ||
|
|
79b3cf3e02 | ||
|
|
b4fd710e1a | ||
|
|
b68b0ede7a | ||
|
|
68f737702b | ||
|
|
f65e31d22f | ||
|
|
f496399ac4 | ||
|
|
3166ed55b2 | ||
|
|
c94ab5976a | ||
|
|
6de74ea6d7 | ||
|
|
72472456d8 | ||
|
|
c5c24c239b | ||
|
|
c5b0e9f485 | ||
|
|
abdefb8a01 | ||
|
|
afbd773dc6 | ||
|
|
2a4b9ea233 | ||
|
|
3b98439eca | ||
|
|
fde63b880d | ||
|
|
2d511defd9 | ||
|
|
dd1ea9763a | ||
|
|
e76d1135dd | ||
|
|
fcf2c0fd1a | ||
|
|
9864efa532 | ||
|
|
aa620d09a0 | ||
|
|
2eabdf3f98 | ||
|
|
5ed109d59f | ||
|
|
3f405b34e9 | ||
|
|
290777b3d9 | ||
|
|
77c81ca6ea | ||
|
|
2d1b7955ae | ||
|
|
862c8da560 | ||
|
|
2d9f341c3e | ||
|
|
436ee0a2ea | ||
|
|
b393f5db51 | ||
|
|
a2562f9d74 | ||
|
|
d6dadd95ac | ||
|
|
993d3f710b | ||
|
|
4a94eb3ea4 | ||
|
|
3a0cee28d6 | ||
|
|
4f845a0713 | ||
|
|
473700f016 | ||
|
|
9ce866ed4f | ||
|
|
69ef4987a6 | ||
|
|
53cc8ad35a | ||
|
|
e2fcba038c | ||
|
|
5f59f20636 | ||
|
|
59de2c7afa | ||
|
|
4b616c8cf2 | ||
|
|
4dd61df6f8 | ||
|
|
c0c31656ff | ||
|
|
8b16b43b7f | ||
|
|
dff396de0f | ||
|
|
f06ffdb6fa | ||
|
|
6e67aaa7f2 | ||
|
|
934ab76835 | ||
|
|
fc9878f6a4 | ||
|
|
a4d3bfe3d6 | ||
|
|
a7effa8400 | ||
|
|
a04c6bbf8f | ||
|
|
77ea8cbdd7 | ||
|
|
20b3660495 | ||
|
|
046b659ce2 | ||
|
|
413c270723 | ||
|
|
ec3a2dc773 | ||
|
|
012875258c | ||
|
|
692250c6be | ||
|
|
d2352347cf | ||
|
|
92168cbbc5 | ||
|
|
963015005e | ||
|
|
10d8b701a1 | ||
|
|
543c794a76 | ||
|
|
57cd0c3dea | ||
|
|
b524dd4c35 | ||
|
|
09703609fc | ||
|
|
eae04f1952 | ||
|
|
5699b05072 | ||
|
|
09552f9d9c | ||
|
|
f18373dc5d | ||
|
|
b27429729d | ||
|
|
60a9a49f83 | ||
|
|
d7d24750be | ||
|
|
514c2d3c4d | ||
|
|
bfde076022 | ||
|
|
d3862812ff | ||
|
|
8d26385d76 | ||
|
|
67b7b904ba | ||
|
|
f60218ec41 | ||
|
|
91818723a1 | ||
|
|
e9aec001f4 | ||
|
|
0184a97dbd | ||
|
|
85b9f76f1d | ||
|
|
e2cb760dcc | ||
|
|
925b3638ff | ||
|
|
9a6fd3ef29 | ||
|
|
2f82de18ee | ||
|
|
6e16aca8b0 | ||
|
|
d4d12daed9 | ||
|
|
f467a8f66d | ||
|
|
c9184ed87e | ||
|
|
1fc4a962e4 | ||
|
|
08284c86ed | ||
|
|
f502b0dea1 | ||
|
|
1200f28d66 | ||
|
|
76ed3476d3 | ||
|
|
58dc1f2c78 | ||
|
|
5a7f561a9b | ||
|
|
ed9a7f5436 | ||
|
|
1f64207f26 | ||
|
|
42b50483be | ||
|
|
6264cf9666 | ||
|
|
f386632800 | ||
|
|
5e49a57ecc | ||
|
|
3d31b39297 | ||
|
|
73cfe48031 | ||
|
|
05538587ef | ||
|
|
f92d7416d7 | ||
|
|
1f12d808e7 | ||
|
|
29a4066a4d | ||
|
|
7afb4e3f54 | ||
|
|
495f075b41 | ||
|
|
b5e8d529e6 | ||
|
|
3e279411fe | ||
|
|
47574c9cba | ||
|
|
6ff14ddd2e | ||
|
|
5946aa0877 | ||
|
|
d800ab2847 | ||
|
|
2c365f4723 | ||
|
|
a1a253ea50 | ||
|
|
c72058bcc6 | ||
|
|
27f26e48b7 | ||
|
|
8c23221666 | ||
|
|
731f3c37a0 | ||
|
|
4b444723f0 | ||
|
|
816605a137 | ||
|
|
78cefd78d6 | ||
|
|
a0a561ae85 | ||
|
|
ed3d0170d9 | ||
|
|
976128f368 | ||
|
|
d04d672a80 | ||
|
|
036f439f53 | ||
|
|
1bce3e6b35 | ||
|
|
e3cbec10c1 | ||
|
|
8abdd7b553 | ||
|
|
ff13c5e7af | ||
|
|
27bd0b9a91 | ||
|
|
bce144595c | ||
|
|
75eba3b07d | ||
|
|
1591eddaea | ||
|
|
4fec80ba6f | ||
|
|
7fe8ed1787 | ||
|
|
e204062310 | ||
|
|
44c722931b | ||
|
|
2d520a9826 | ||
|
|
24d894e2e2 | ||
|
|
ccfcef6b59 | ||
|
|
e0004aa28a | ||
|
|
b668112320 | ||
|
|
dae9a00a28 | ||
|
|
71995e1397 | ||
|
|
8177563ebe | ||
|
|
4202fba82a | ||
|
|
812c030e87 | ||
|
|
1217c7da91 | ||
|
|
7d69f2d956 | ||
|
|
385dcb7c60 | ||
|
|
b8b936a6ea | ||
|
|
b5f665de32 | ||
|
|
e5ae386ea4 | ||
|
|
36e51aad3c | ||
|
|
b490299a3b | ||
|
|
5db7070dd1 | ||
|
|
d7fe6b356c | ||
|
|
fcf01dd88e | ||
|
|
4f66312df8 | ||
|
|
3fafb7b189 | ||
|
|
776a070421 | ||
|
|
dfeca6cf40 | ||
|
|
6aa5bc8635 | ||
|
|
d8f47d2efa | ||
|
|
0a9315bbc7 | ||
|
|
1ff419d343 | ||
|
|
24df576795 | ||
|
|
fdf1ca30f0 | ||
|
|
052c5d19d5 | ||
|
|
5ddd199870 | ||
|
|
a9d6fa8b2b | ||
|
|
4564b05483 | ||
|
|
72613bc379 | ||
|
|
ebcd55d641 | ||
|
|
4b461a6931 | ||
|
|
93e7a38370 | ||
|
|
617304b2cf | ||
|
|
ba502fb89a | ||
|
|
6c6b9689bb | ||
|
|
d9fd937e39 | ||
|
|
fe9dc522d4 | ||
|
|
505e7e8b9d | ||
|
|
6fd7e6db3d | ||
|
|
fdca6e36ee | ||
|
|
90ae0cffec | ||
|
|
de4cb50ca6 | ||
|
|
a09e09ce76 | ||
|
|
48d2949416 | ||
|
|
6ae8373d40 | ||
|
|
b58e24cc3c | ||
|
|
d53fe399eb | ||
|
|
a837765e8c | ||
|
|
f540b494a4 | ||
|
|
8060974344 | ||
|
|
b0d975e216 | ||
|
|
e54d7d536e | ||
|
|
1e9b4d5a95 | ||
|
|
efc2b7db95 | ||
|
|
bfd68019c2 | ||
|
|
1946867bc2 | ||
|
|
1664948e41 | ||
|
|
935e588799 | ||
|
|
eed59dcc1e | ||
|
|
2cac7623a5 | ||
|
|
298d83b340 | ||
|
|
0185b75381 | ||
|
|
7132e5cdff | ||
|
|
98bdb4468b | ||
|
|
ea11ee09f3 | ||
|
|
c62c480dc6 | ||
|
|
197bd126f0 | ||
|
|
f45f07ab86 | ||
|
|
a053ff3979 | ||
|
|
ecdd2a3658 | ||
|
|
2f34ad31ac | ||
|
|
671f0afa1d | ||
|
|
64ed74c01e | ||
|
|
1a81a1898e | ||
|
|
6ba21bf2b8 | ||
|
|
09e4bc0501 | ||
|
|
6e2a7ee1bc | ||
|
|
65f0513a33 | ||
|
|
6f83c4537c | ||
|
|
cca94272fa | ||
|
|
66b121b2fc | ||
|
|
8d34120a53 | ||
|
|
1a01af079e | ||
|
|
87e5e05aea | ||
|
|
4d039aa2ca | ||
|
|
21e255a8f1 | ||
|
|
d5477c7afd | ||
|
|
02a6108235 | ||
|
|
7233341eac | ||
|
|
8be6fd95a3 | ||
|
|
59dbb47065 | ||
|
|
9c7db2491b | ||
|
|
0fe6f3c521 | ||
|
|
036362ede6 | ||
|
|
a757dd4863 | ||
|
|
f5cc22bdc6 | ||
|
|
5dd1b2c525 | ||
|
|
cc7609aa9f | ||
|
|
f1378aef91 | ||
|
|
b2d8d07109 | ||
|
|
f9791498ae | ||
|
|
f091061711 | ||
|
|
4abcff0177 | ||
|
|
63c58c2a3f | ||
|
|
304880d185 | ||
|
|
5d79d728f5 | ||
|
|
dc51af3d03 | ||
|
|
350622a107 | ||
|
|
63fda37e20 | ||
|
|
293ef29655 | ||
|
|
535c99f157 | ||
|
|
45a5df5914 | ||
|
|
3b5f22ca40 | ||
|
|
b5db4ed5f6 | ||
|
|
168524543f | ||
|
|
3e123b8497 | ||
|
|
42137efde7 | ||
|
|
eeb2f9e546 | ||
|
|
5dbaa520a5 | ||
|
|
dd48f7204c | ||
|
|
04095f7581 | ||
|
|
a584a81b3e | ||
|
|
619e8ecd0c | ||
|
|
23da638360 | ||
|
|
dfbda5e025 | ||
|
|
2b03751c3c | ||
|
|
dbc0dfd2d5 | ||
|
|
11f139a647 | ||
|
|
6e614e9e10 | ||
|
|
c049472b8a | ||
|
|
9a804b2812 | ||
|
|
fbbc40f385 | ||
|
|
8cf9f0a3e7 | ||
|
|
e6618ece2d | ||
|
|
58c4720293 | ||
|
|
836d5c44b6 | ||
|
|
11c2a3655f | ||
|
|
539aa4d333 | ||
|
|
f85a415279 | ||
|
|
6489455bed | ||
|
|
d668caa79c | ||
|
|
74bf4ee7bf | ||
|
|
33ba90c6e9 | ||
|
|
ccd62415ac | ||
|
|
bd7bb5df71 | ||
|
|
e3417a06e2 | ||
|
|
7fb80b5eae | ||
|
|
2d17b09a6d | ||
|
|
24c8f38784 | ||
|
|
25f03cf8e9 | ||
|
|
270e1c904a | ||
|
|
b4f59c7e27 | ||
|
|
ab4ee2e524 | ||
|
|
58ebb96cce | ||
|
|
99713dc7d3 | ||
|
|
1c1c0257f4 | ||
|
|
cafe659f72 | ||
|
|
72ed8196b3 | ||
|
|
107ac7ac96 | ||
|
|
234772db6d | ||
|
|
760625acba | ||
|
|
c57789d138 | ||
|
|
f33df30732 | ||
|
|
6e381180ae | ||
|
|
056ba9b795 | ||
|
|
88664afe14 | ||
|
|
f98efea9b1 | ||
|
|
d9e3a4b5db | ||
|
|
66d8ffabbd | ||
|
|
ace23463c5 | ||
|
|
bbfe4e996c | ||
|
|
9f430fa07f | ||
|
|
55af207321 | ||
|
|
64953c8ed2 | ||
|
|
eb7cbf27bc | ||
|
|
6b95e35e96 | ||
|
|
328378f9cb |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
<!--
|
||||
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||
|
||||
|
||||
This is a bug report template. By following the instructions below and
|
||||
filling out the sections with your information, you will help the us to get all
|
||||
the necessary data to fix your issue.
|
||||
|
||||
You can also preview your report before submitting it. You may remove sections
|
||||
that aren't relevant to your particular case.
|
||||
|
||||
Text between <!-- and --> marks will be invisible in the report.
|
||||
|
||||
-->
|
||||
|
||||
### Description
|
||||
|
||||
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
- For bugs, list the steps
|
||||
- that reproduce the bug
|
||||
- using hyphens as bullet points
|
||||
|
||||
Describe how what happens differs from what you expected.
|
||||
|
||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those here (please be careful to remove any personal or private data):
|
||||
|
||||
### Version information
|
||||
|
||||
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||
|
||||
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||
|
||||
If not matrix.org:
|
||||
- **Version**: What version of Synapse is running? <!--
|
||||
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||
your own homeserver domain):
|
||||
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
-->
|
||||
- **Install method**: package manager/git clone/pip
|
||||
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||
- distro, hardware, if it's running in a vm/container, etc.
|
||||
119
CHANGES.rst
119
CHANGES.rst
@@ -1,3 +1,122 @@
|
||||
Changes in synapse v0.23.0 (2017-10-02)
|
||||
=======================================
|
||||
|
||||
No changes since v0.23.0-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.23.0-rc2 (2017-09-26)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix regression in performance of syncs (PR #2470)
|
||||
|
||||
|
||||
Changes in synapse v0.23.0-rc1 (2017-09-25)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add a frontend proxy worker (PR #2344)
|
||||
* Add support for event_id_only push format (PR #2450)
|
||||
* Add a PoC for filtering spammy events (PR #2456)
|
||||
* Add a config option to block all room invites (PR #2457)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
|
||||
* Improve performance of generating push notifications (PR #2343, #2357, #2365,
|
||||
#2366, #2371)
|
||||
* Improve DB performance for device list handling in sync (PR #2362)
|
||||
* Include a sample prometheus config (PR #2416)
|
||||
* Document known to work postgres version (PR #2433) Thanks to @ptman!
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix caching error in the push evaluator (PR #2332)
|
||||
* Fix bug where pusherpool didn't start and broke some rooms (PR #2342)
|
||||
* Fix port script for user directory tables (PR #2375)
|
||||
* Fix device lists notifications when user rejoins a room (PR #2443, #2449)
|
||||
* Fix sync to always send down current state events in timeline (PR #2451)
|
||||
* Fix bug where guest users were incorrectly kicked (PR #2453)
|
||||
* Fix bug talking to IPv6 only servers using SRV records (PR #2462)
|
||||
|
||||
|
||||
Changes in synapse v0.22.1 (2017-07-06)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where pusher pool didn't start and caused issues when
|
||||
interacting with some rooms (PR #2342)
|
||||
|
||||
|
||||
Changes in synapse v0.22.0 (2017-07-06)
|
||||
=======================================
|
||||
|
||||
No changes since v0.22.0-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.22.0-rc2 (2017-07-04)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve performance of storing user IPs (PR #2307, #2308)
|
||||
* Slightly improve performance of verifying access tokens (PR #2320)
|
||||
* Slightly improve performance of event persistence (PR #2321)
|
||||
* Increase default cache factor size from 0.1 to 0.5 (PR #2330)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with storing registration sessions that caused frequent CPU churn
|
||||
(PR #2319)
|
||||
|
||||
|
||||
Changes in synapse v0.22.0-rc1 (2017-06-26)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add a user directory API (PR #2252, and many more)
|
||||
* Add shutdown room API to remove room from local server (PR #2291)
|
||||
* Add API to quarantine media (PR #2292)
|
||||
* Add new config option to not send event contents to push servers (PR #2301)
|
||||
Thanks to @cjdelisle!
|
||||
|
||||
Changes:
|
||||
|
||||
* Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256,
|
||||
#2274)
|
||||
* Deduplicate sync filters (PR #2219) Thanks to @krombel!
|
||||
* Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
|
||||
* Add count of one time keys to sync stream (PR #2237)
|
||||
* Only store event_auth for state events (PR #2247)
|
||||
* Store URL cache preview downloads separately (PR #2299)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix users not getting notifications when AS listened to that user_id (PR
|
||||
#2216) Thanks to @slipeer!
|
||||
* Fix users without push set up not getting notifications after joining rooms
|
||||
(PR #2236)
|
||||
* Fix preview url API to trim long descriptions (PR #2243)
|
||||
* Fix bug where we used cached but unpersisted state group as prev group,
|
||||
resulting in broken state of restart (PR #2263)
|
||||
* Fix removing of pushers when using workers (PR #2267)
|
||||
* Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
|
||||
|
||||
|
||||
Changes in synapse v0.21.1 (2017-06-15)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug in anonymous usage statistic reporting (PR #2281)
|
||||
|
||||
|
||||
Changes in synapse v0.21.0 (2017-05-18)
|
||||
=======================================
|
||||
|
||||
|
||||
@@ -27,4 +27,5 @@ exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune .github
|
||||
prune demo/etc
|
||||
|
||||
68
README.rst
68
README.rst
@@ -200,11 +200,11 @@ different. See `the spec`__ for more information on key management.)
|
||||
.. __: `key_management`_
|
||||
|
||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||
configured without TLS; it is not recommended this be exposed outside your
|
||||
local network. Port 8448 is configured to use TLS with a self-signed
|
||||
certificate. This is fine for testing with but, to avoid your clients
|
||||
complaining about the certificate, you will almost certainly want to use
|
||||
another certificate for production purposes. (Note that a self-signed
|
||||
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||
termination on port 443 which in turn should be used for clients. Port 8448
|
||||
is configured to use TLS with a self-signed certificate. If you would like
|
||||
to do initial test with a client without having to setup a reverse proxy,
|
||||
you can temporarly use another certificate. (Note that a self-signed
|
||||
certificate is fine for `Federation`_). You can do so by changing
|
||||
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||
@@ -283,10 +283,16 @@ Connecting to Synapse from a client
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) unless you changed the configuration. (Leave the identity
|
||||
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||
server as the default - see `Identity servers`_.)
|
||||
|
||||
If using port 8448 you will run into errors until you accept the self-signed
|
||||
certificate. You can easily do this by going to ``https://localhost:8448``
|
||||
directly with your browser and accept the presented certificate. You can then
|
||||
go back in your web client and proceed further.
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
@@ -359,7 +365,7 @@ https://www.archlinux.org/packages/community/any/matrix-synapse/, which should p
|
||||
the necessary dependencies. If the default web client is to be served (enabled by default in
|
||||
the generated config),
|
||||
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
|
||||
be installed.
|
||||
be installed.
|
||||
|
||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||
@@ -528,6 +534,30 @@ fix try re-installing from PyPI or directly from
|
||||
# Install from github
|
||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||
|
||||
Running out of File Handles
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of June 2017 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
ArchLinux
|
||||
~~~~~~~~~
|
||||
|
||||
@@ -569,8 +599,9 @@ you to run your server on a machine that might not have the same name as your
|
||||
domain name. For example, you might want to run your server at
|
||||
``synapse.example.com``, but have your Matrix user-ids look like
|
||||
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||
the default 8448. However, if you are thinking of using a reverse-proxy, be
|
||||
sure to read `Reverse-proxying the federation port`_ first.)
|
||||
the default 8448. However, if you are thinking of using a reverse-proxy on the
|
||||
federation port, which is not recommended, be sure to read
|
||||
`Reverse-proxying the federation port`_ first.)
|
||||
|
||||
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||
@@ -650,7 +681,7 @@ For information on how to install and use PostgreSQL, please see
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
It is possible to put a reverse proxy such as
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
@@ -668,9 +699,9 @@ federation port has a number of pitfalls. It is possible, but be sure to read
|
||||
`Reverse-proxying the federation port`_.
|
||||
|
||||
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||
for client connections, but to also expose port 8448 for server-server
|
||||
connections. All the Matrix endpoints begin ``/_matrix``, so an example nginx
|
||||
configuration might look like::
|
||||
to port 8008 of synapse for client connections, but to also directly expose port
|
||||
8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
|
||||
so an example nginx configuration might look like::
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
@@ -875,12 +906,9 @@ cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
||||
at around 3-4GB of resident memory - this is what we currently run the
|
||||
matrix.org on. The default setting is currently 0.1, which is probably
|
||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
|
||||
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||
|
||||
93
UPGRADE.rst
93
UPGRADE.rst
@@ -5,39 +5,48 @@ Before upgrading check if any special steps are required to upgrade from the
|
||||
what you currently have installed to current version of synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
If synapse was installed in a virtualenv then active that virtualenv before
|
||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
||||
1. If synapse was installed in a virtualenv then active that virtualenv before
|
||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
||||
run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
source ~/.synapse/bin/activate
|
||||
|
||||
2. If synapse was installed using pip then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
# restart synapse
|
||||
synctl restart
|
||||
|
||||
|
||||
If synapse was installed using git then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs pip install --upgrade
|
||||
|
||||
# restart synapse
|
||||
./synctl restart
|
||||
|
||||
|
||||
To check whether your update was sucessful, you can check the Server header
|
||||
returned by the Client-Server API:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
source ~/.synapse/bin/activate
|
||||
|
||||
If synapse was installed using pip then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If synapse was installed using git then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
||||
|
||||
To check whether your update was sucessfull, run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# replace your.server.domain with ther domain of your synaspe homeserver
|
||||
curl https://<your.server.domain>/_matrix/federation/v1/version
|
||||
|
||||
So for the Matrix.org HS server the URL would be: https://matrix.org/_matrix/federation/v1/version.
|
||||
|
||||
# replace <host.name> with the hostname of your synapse homeserver.
|
||||
# You may need to specify a port (eg, :8448) if your server is not
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
@@ -77,7 +86,7 @@ It has been replaced by specifying a list of application service registrations i
|
||||
``homeserver.yaml``::
|
||||
|
||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||
|
||||
|
||||
Where ``registration-01.yaml`` looks like::
|
||||
|
||||
url: <String> # e.g. "https://my.application.service.com"
|
||||
@@ -166,7 +175,7 @@ This release completely changes the database schema and so requires upgrading
|
||||
it before starting the new version of the homeserver.
|
||||
|
||||
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
but will otherwise purge the database. This includes messages, which
|
||||
rooms the home server was a member of and room alias mappings.
|
||||
|
||||
@@ -175,18 +184,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is,
|
||||
unfortunately, non trivial and requires human intervention to resolve any
|
||||
resulting conflicts during the upgrade process.
|
||||
|
||||
Before running the command the homeserver should be first completely
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
restart than usual as it reinitializes the database.
|
||||
|
||||
On startup of the new version, users can either rejoin remote rooms using room
|
||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
automatically rejoin the room.
|
||||
|
||||
Upgrading to v0.4.0
|
||||
@@ -245,7 +254,7 @@ automatically generate default config use::
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
|
||||
This config can be edited if desired, for example to specify a different SSL
|
||||
This config can be edited if desired, for example to specify a different SSL
|
||||
certificate to use. Once done you can run the home server using::
|
||||
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
@@ -266,20 +275,20 @@ This release completely changes the database schema and so requires upgrading
|
||||
it before starting the new version of the homeserver.
|
||||
|
||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
but will otherwise purge the database. This includes messages, which
|
||||
rooms the home server was a member of and room alias mappings.
|
||||
|
||||
Before running the command the homeserver should be first completely
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
restart than usual as it reinitializes the database.
|
||||
|
||||
On startup of the new version, users can either rejoin remote rooms using room
|
||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
automatically rejoin the room.
|
||||
|
||||
20
contrib/prometheus/README
Normal file
20
contrib/prometheus/README
Normal file
@@ -0,0 +1,20 @@
|
||||
This directory contains some sample monitoring config for using the
|
||||
'Prometheus' monitoring server against synapse.
|
||||
|
||||
To use it, first install prometheus by following the instructions at
|
||||
|
||||
http://prometheus.io/
|
||||
|
||||
Then add a new job to the main prometheus.conf file:
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
|
||||
target_group: {
|
||||
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||
}
|
||||
}
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||
command-line option.
|
||||
395
contrib/prometheus/consoles/synapse.html
Normal file
395
contrib/prometheus/consoles/synapse.html
Normal file
@@ -0,0 +1,395 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>System Resources</h1>
|
||||
|
||||
<h3>CPU</h3>
|
||||
<div id="process_resource_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "%",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resource_maxrss"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_maxrss"),
|
||||
expr: "process_psutil_rss:max",
|
||||
name: "Maxrss",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "bytes",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>File descriptors</h3>
|
||||
<div id="process_fds"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds{job='synapse'}",
|
||||
name: "FDs",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "",
|
||||
yTitle: "Descriptors"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Reactor</h1>
|
||||
|
||||
<h3>Total reactor time</h3>
|
||||
<div id="reactor_total_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||
name: "time",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average reactor tick time</h3>
|
||||
<div id="reactor_average_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||
name: "time",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Pending calls per tick</h3>
|
||||
<div id="reactor_pending_calls"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_pending_calls"),
|
||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||
name: "calls",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yTitle: "Pending Cals"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Storage</h1>
|
||||
|
||||
<h3>Queries</h3>
|
||||
<div id="synapse_storage_query_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "queries/s",
|
||||
yTitle: "Queries"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transactions</h3>
|
||||
<div id="synapse_storage_transaction_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "txn/s",
|
||||
yTitle: "Transactions"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_msec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Database scheduling latency</h3>
|
||||
<div id="synapse_storage_schedule_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||
name: "Total latency",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache hit ratio</h3>
|
||||
<div id="synapse_cache_ratio"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_ratio"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||
name: "[[name]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: "Percentage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache size</h3>
|
||||
<div id="synapse_cache_size"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Items"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Requests</h1>
|
||||
|
||||
<h3>Requests by Servlet</h3>
|
||||
<div id="synapse_http_server_requests_servlet"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
||||
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average response times</h3>
|
||||
<div id="synapse_http_server_response_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>All responses by code</h3>
|
||||
<div id="synapse_http_server_responses"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses"),
|
||||
expr: "rate(synapse_http_server_responses[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Error responses by code</h3>
|
||||
<div id="synapse_http_server_responses_err"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="synapse_http_server_response_ru_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>DB Usage</h3>
|
||||
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "DB Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>Average event send times</h3>
|
||||
<div id="synapse_http_server_send_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Federation</h1>
|
||||
|
||||
<h3>Sent Messages</h3>
|
||||
<div id="synapse_federation_client_sent"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Received Messages</h3>
|
||||
<div id="synapse_federation_server_received"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Pending</h3>
|
||||
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||
expr: "synapse_federation_transaction_queue_pending",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Units"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Clients</h1>
|
||||
|
||||
<h3>Notifiers</h3>
|
||||
<div id="synapse_notifier_listeners"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "listeners",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Listeners"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Notified Events</h3>
|
||||
<div id="synapse_notifier_notified_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "events",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
yTitle: "Event rate"
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
21
contrib/prometheus/synapse.rules
Normal file
21
contrib/prometheus/synapse.rules
Normal file
@@ -0,0 +1,21 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
||||
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
||||
|
||||
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -9,9 +9,10 @@ Description=Synapse Matrix homeserver
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
EnvironmentFile=-/etc/sysconfig/synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
Using Postgres
|
||||
--------------
|
||||
|
||||
Postgres version 9.4 or later is known to work.
|
||||
|
||||
Set up database
|
||||
===============
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--python $WORKSPACE/.tox/py27/bin/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--haproxy \
|
||||
|
||||
@@ -15,5 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--python $WORKSPACE/.tox/py27/bin/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
|
||||
@@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--python $WORKSPACE/.tox/py27/bin/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
@@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--python $WORKSPACE/.tox/py27/bin/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
87
scripts-dev/federation_client.py
Normal file → Executable file
87
scripts-dev/federation_client.py
Normal file → Executable file
@@ -1,10 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import nacl.signing
|
||||
import json
|
||||
import base64
|
||||
import requests
|
||||
import sys
|
||||
import srvlookup
|
||||
|
||||
import yaml
|
||||
|
||||
def encode_base64(input_bytes):
|
||||
"""Encode bytes as a base64 string without any padding."""
|
||||
@@ -120,11 +140,13 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
origin_name, key, sig,
|
||||
)
|
||||
authorization_headers.append(bytes(header))
|
||||
sys.stderr.write(header)
|
||||
sys.stderr.write("\n")
|
||||
print ("Authorization: %s" % header, file=sys.stderr)
|
||||
|
||||
dest = lookup(destination, path)
|
||||
print ("Requesting %s" % dest, file=sys.stderr)
|
||||
|
||||
result = requests.get(
|
||||
lookup(destination, path),
|
||||
dest,
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
)
|
||||
@@ -133,17 +155,66 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
|
||||
|
||||
def main():
|
||||
origin_name, keyfile, destination, path = sys.argv[1:]
|
||||
parser = argparse.ArgumentParser(
|
||||
description=
|
||||
"Signs and sends a federation request to a matrix homeserver",
|
||||
)
|
||||
|
||||
with open(keyfile) as f:
|
||||
parser.add_argument(
|
||||
"-N", "--server-name",
|
||||
help="Name to give as the local homeserver. If unspecified, will be "
|
||||
"read from the config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-k", "--signing-key-path",
|
||||
help="Path to the file containing the private ed25519 key to sign the "
|
||||
"request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
default="homeserver.yaml",
|
||||
help="Path to server config file. Ignored if --server-name and "
|
||||
"--signing-key-path are both given.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-d", "--destination",
|
||||
default="matrix.org",
|
||||
help="name of the remote homeserver. We will do SRV lookups and "
|
||||
"connect appropriately.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"path",
|
||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
|
||||
result = get_json(
|
||||
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
||||
args.server_name, key, args.destination, "/_matrix/federation/v1/" + args.path
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ""
|
||||
print ("")
|
||||
|
||||
|
||||
def read_args_from_config(args):
|
||||
with open(args.config, 'r') as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config['server_name']
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config['signing_key_path']
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -41,6 +41,7 @@ BOOLEAN_COLUMNS = {
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +122,7 @@ class Store(object):
|
||||
try:
|
||||
txn = conn.cursor()
|
||||
return func(
|
||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||
*args, **kwargs
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
@@ -251,6 +252,25 @@ class Porter(object):
|
||||
)
|
||||
return
|
||||
|
||||
if table in (
|
||||
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||
"users_in_pubic_room",
|
||||
):
|
||||
# We don't port these tables, as they're a faff and we can regenreate
|
||||
# them anyway.
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
return
|
||||
|
||||
if table == "user_directory_stream_pos":
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
yield self.postgres_store._simple_insert(
|
||||
table=table,
|
||||
values={"stream_id": None},
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
return
|
||||
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.21.0"
|
||||
__version__ = "0.23.0"
|
||||
|
||||
@@ -23,7 +23,8 @@ from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, Codes
|
||||
from synapse.types import UserID
|
||||
from synapse.util import logcontext
|
||||
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -39,6 +40,10 @@ AuthEventTypes = (
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
|
||||
class _InvalidMacaroonException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Auth(object):
|
||||
"""
|
||||
FIXME: This class contains a mix of functions for authenticating users
|
||||
@@ -51,6 +56,9 @@ class Auth(object):
|
||||
self.state = hs.get_state_handler()
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("token_cache", self.token_cache)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
@@ -144,17 +152,8 @@ class Auth(object):
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
|
||||
logger.debug("calling resolve_state_groups from check_host_in_room")
|
||||
entry = yield self.state.resolve_state_groups(
|
||||
room_id, latest_event_ids
|
||||
)
|
||||
|
||||
ret = yield self.store.is_host_joined(
|
||||
room_id, host, entry.state_group, entry.state
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
defer.returnValue(latest_event_ids)
|
||||
|
||||
def _check_joined_room(self, member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
@@ -209,8 +208,8 @@ class Auth(object):
|
||||
default=[""]
|
||||
)[0]
|
||||
if user and access_token and ip_addr:
|
||||
logcontext.preserve_fn(self.store.insert_client_ip)(
|
||||
user=user,
|
||||
self.store.insert_client_ip(
|
||||
user_id=user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
@@ -276,8 +275,8 @@ class Auth(object):
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
except Exception: # deserialize can throw more-or-less anything
|
||||
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||
except _InvalidMacaroonException:
|
||||
# doesn't look like a macaroon: treat it as an opaque token which
|
||||
# must be in the database.
|
||||
# TODO: it would be nice to get rid of this, but apparently some
|
||||
@@ -286,19 +285,8 @@ class Auth(object):
|
||||
defer.returnValue(r)
|
||||
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
if guest:
|
||||
# Guest access tokens are not stored in the database (there can
|
||||
# only be one access token per guest, anyway).
|
||||
@@ -370,6 +358,55 @@ class Auth(object):
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
|
||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||
if and only if rights == access and there isn't an expiry.
|
||||
|
||||
On invalid macaroon raises _InvalidMacaroonException
|
||||
|
||||
Returns:
|
||||
(user_id, is_guest)
|
||||
"""
|
||||
if rights == "access":
|
||||
cached = self.token_cache.get(token, None)
|
||||
if cached:
|
||||
return cached
|
||||
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
except Exception: # deserialize can throw more-or-less anything
|
||||
# doesn't look like a macaroon: treat it as an opaque token which
|
||||
# must be in the database.
|
||||
# TODO: it would be nice to get rid of this, but apparently some
|
||||
# people use access tokens which aren't macaroons
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
|
||||
has_expiry = False
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith("time "):
|
||||
has_expiry = True
|
||||
elif caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token,
|
||||
user_id=user_id,
|
||||
)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
|
||||
if not has_expiry and rights == "access":
|
||||
self.token_cache[token] = (user_id, guest)
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def get_user_id_from_macaroon(self, macaroon):
|
||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||
|
||||
@@ -482,6 +519,14 @@ class Auth(object):
|
||||
)
|
||||
|
||||
def is_server_admin(self, user):
|
||||
""" Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
user (str): mxid of user to check
|
||||
|
||||
Returns:
|
||||
bool: True if the user is an admin
|
||||
"""
|
||||
return self.store.is_server_admin(user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
99
synapse/app/_base.py
Normal file
99
synapse/app/_base.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import logging
|
||||
|
||||
import affinity
|
||||
from daemonize import Daemonize
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from twisted.internet import reactor
|
||||
|
||||
|
||||
def start_worker_reactor(appname, config):
|
||||
""" Run the reactor in the main process
|
||||
|
||||
Daemonizes if necessary, and then configures some resources, before starting
|
||||
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||
|
||||
Args:
|
||||
appname (str): application name which will be sent to syslog
|
||||
config (synapse.config.Config): config object
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(config.worker_app)
|
||||
|
||||
start_reactor(
|
||||
appname,
|
||||
config.soft_file_limit,
|
||||
config.gc_thresholds,
|
||||
config.worker_pid_file,
|
||||
config.worker_daemonize,
|
||||
config.worker_cpu_affinity,
|
||||
logger,
|
||||
)
|
||||
|
||||
|
||||
def start_reactor(
|
||||
appname,
|
||||
soft_file_limit,
|
||||
gc_thresholds,
|
||||
pid_file,
|
||||
daemonize,
|
||||
cpu_affinity,
|
||||
logger,
|
||||
):
|
||||
""" Run the reactor in the main process
|
||||
|
||||
Daemonizes if necessary, and then configures some resources, before starting
|
||||
the reactor
|
||||
|
||||
Args:
|
||||
appname (str): application name which will be sent to syslog
|
||||
soft_file_limit (int):
|
||||
gc_thresholds:
|
||||
pid_file (str): name of pid file to write to if daemonize is True
|
||||
daemonize (bool): true to run the reactor in a background process
|
||||
cpu_affinity (int|None): cpu affinity mask
|
||||
logger (logging.Logger): logger instance to pass to Daemonize
|
||||
"""
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
if cpu_affinity is not None:
|
||||
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
if daemonize:
|
||||
daemon = Daemonize(
|
||||
app=appname,
|
||||
pid=pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
@@ -13,38 +13,31 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
@@ -181,36 +174,13 @@ def start(config_options):
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-appservice",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-appservice", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,47 +13,39 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
@@ -65,8 +57,8 @@ class ClientReaderSlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -183,36 +175,13 @@ def start(config_options):
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-client-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-client-reader", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,44 +13,36 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse import events
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
@@ -172,36 +164,13 @@ def start(config_options):
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,45 +13,38 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.federation import send_queue
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
@@ -213,36 +206,12 @@ def start(config_options):
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-sender",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
|
||||
239
synapse/app/frontend_proxy.py
Normal file
239
synapse/app/frontend_proxy.py
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
RestServlet, parse_json_object_from_request,
|
||||
)
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$",
|
||||
releases=())
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(KeyUploadServlet, self).__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, device_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if (requester.device_id is not None and
|
||||
device_id != requester.device_id):
|
||||
logger.warning("Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id, device_id)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
result = yield self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri,
|
||||
body,
|
||||
)
|
||||
|
||||
defer.returnValue((200, result))
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||
|
||||
|
||||
class FrontendProxySlavedStore(
|
||||
SlavedDeviceStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse frontend proxy", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||
|
||||
assert config.worker_main_http_uri is not None
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -13,61 +13,48 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
import synapse.config.logger
|
||||
from synapse import events
|
||||
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, CONDITIONAL_REQUIREMENTS
|
||||
)
|
||||
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import register_memory_metrics
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||
check_requirements
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
from synapse.http.site import SynapseSite
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
@@ -398,7 +385,8 @@ def run(hs):
|
||||
ThreadPool._worker = profile(ThreadPool._worker)
|
||||
reactor.run = profile(reactor.run)
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
clock = hs.get_clock()
|
||||
start_time = clock.time()
|
||||
|
||||
stats = {}
|
||||
|
||||
@@ -410,41 +398,23 @@ def run(hs):
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
# If the stats directory is empty then this is the first time we've
|
||||
# reported stats.
|
||||
first_time = not stats
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
else:
|
||||
stats.pop("daily_messages", None)
|
||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
if first_time:
|
||||
# Add callbacks to report the synapse stats as metrics whenever
|
||||
# prometheus requests them, typically every 30s.
|
||||
# As some of the stats are expensive to calculate we only update
|
||||
# them when synapse phones home to matrix.org every 24 hours.
|
||||
metrics = get_metrics_for("synapse.usage")
|
||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
||||
metrics.add_callback(
|
||||
"daily_active_users", lambda: stats["daily_active_users"]
|
||||
)
|
||||
metrics.add_callback(
|
||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
||||
)
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
@@ -456,41 +426,25 @@ def run(hs):
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
if hs.config.report_stats:
|
||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
||||
phone_home_task.start(60 * 60 * 24, now=False)
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
|
||||
def in_thread():
|
||||
# Uncomment to enable tracing of log context changes.
|
||||
# sys.settrace(logcontext_tracer)
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(5 * 60, phone_stats_home)
|
||||
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
reactor.run()
|
||||
if hs.config.daemonize and hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
if hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
pid=hs.config.pid_file,
|
||||
action=lambda: in_thread(),
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
daemon.start()
|
||||
else:
|
||||
in_thread()
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
hs.config.soft_file_limit,
|
||||
hs.config.gc_thresholds,
|
||||
hs.config.pid_file,
|
||||
hs.config.daemonize,
|
||||
hs.config.cpu_affinity,
|
||||
logger,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -13,57 +13,49 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse import events
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
ClientIpStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -180,36 +172,13 @@ def start(config_options):
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-media-repository",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-media-repository", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,41 +13,33 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
||||
PreserveLoggingContext
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
@@ -244,18 +236,6 @@ def start(config_options):
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
@@ -263,18 +243,7 @@ def start(config_options):
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-pusher",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-pusher", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,56 +13,50 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
@@ -77,9 +71,9 @@ class SynchrotronSlavedStore(
|
||||
SlavedPresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
@@ -440,36 +434,13 @@ def start(config_options):
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-synchrotron",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
241
synapse/app/user_dir.py
Normal file
241
synapse/app/user_dir.py
Normal file
@@ -0,0 +1,241 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v2_alpha import user_directory
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
|
||||
class UserDirectorySlaveStore(
|
||||
SlavedEventStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
UserDirectoryStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||
db_conn, "current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
stream_column="stream_id",
|
||||
max_value=events_max, # As we share the stream id with events token
|
||||
limit=1000,
|
||||
)
|
||||
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||
prefilled_cache=curr_state_delta_prefill,
|
||||
)
|
||||
|
||||
self._current_state_delta_pos = events_max
|
||||
|
||||
def stream_positions(self):
|
||||
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||
result["current_state_deltas"] = self._current_state_delta_pos
|
||||
return result
|
||||
|
||||
def process_replication_rows(self, stream_name, token, rows):
|
||||
if stream_name == "current_state_deltas":
|
||||
self._current_state_delta_pos = token
|
||||
for row in rows:
|
||||
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||
row.room_id, token
|
||||
)
|
||||
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||
stream_name, token, rows
|
||||
)
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
user_directory.register_servlets(self, resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse user_dir now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return UserDirectoryReplicationHandler(self)
|
||||
|
||||
|
||||
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||
def __init__(self, hs):
|
||||
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.user_directory = hs.get_user_directory_handler()
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == "current_state_deltas":
|
||||
preserve_fn(self.user_directory.notify_new_event)()
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse user directory", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.user_dir"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.update_user_directory = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ps = UserDirectoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
_base.start_worker_reactor("synapse-user-dir", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -241,6 +241,16 @@ class ApplicationService(object):
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def get_exlusive_user_regexes(self):
|
||||
"""Get the list of regexes used to determine if a user is exclusively
|
||||
registered by the AS
|
||||
"""
|
||||
return [
|
||||
regex_obj["regex"]
|
||||
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||
if regex_obj["exclusive"]
|
||||
]
|
||||
|
||||
def is_rate_limited(self):
|
||||
return self.rate_limited
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@ from .jwt import JWTConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
from .push import PushConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
@@ -40,7 +42,8 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig,):
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -15,13 +15,15 @@
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
import importlib
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
|
||||
class PasswordAuthProviderConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.password_providers = []
|
||||
|
||||
provider_config = None
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
# param.
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
@@ -38,19 +40,15 @@ class PasswordAuthProviderConfig(Config):
|
||||
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
provider_class = LdapAuthProvider
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
else:
|
||||
# We need to import the module, and then pick the class out of
|
||||
# that, so we split based on the last dot.
|
||||
module, clz = provider['module'].rsplit(".", 1)
|
||||
module = importlib.import_module(module)
|
||||
provider_class = getattr(module, clz)
|
||||
(provider_class, provider_config) = load_module(provider)
|
||||
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
|
||||
45
synapse/config/push.py
Normal file
45
synapse/config/push.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class PushConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.push_redact_content = False
|
||||
|
||||
push_config = config.get("email", {})
|
||||
self.push_redact_content = push_config.get("redact_content", False)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Control how push messages are sent to google/apple to notifications.
|
||||
# Normally every message said in a room with one or more people using
|
||||
# mobile devices will be posted to a push server hosted by matrix.org
|
||||
# which is registered with google and apple in order to allow push
|
||||
# notifications to be sent to these mobile devices.
|
||||
#
|
||||
# Setting redact_content to true will make the push messages contain no
|
||||
# message content which will provide increased privacy. This is a
|
||||
# temporary solution pending improvements to Android and iPhone apps
|
||||
# to get content from the app rather than the notification.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# redact_content: false
|
||||
"""
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,14 +30,25 @@ class ServerConfig(Config):
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
self.cpu_affinity = config.get("cpu_affinity")
|
||||
|
||||
# Whether to send federation traffic out in this process. This only
|
||||
# applies to some federation traffic, and so shouldn't be used to
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
|
||||
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||
|
||||
# Whether we should block invites sent to users on this server
|
||||
# (other than those sent by local server admins)
|
||||
self.block_non_admin_invites = config.get(
|
||||
"block_non_admin_invites", False,
|
||||
)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
self.public_baseurl += '/'
|
||||
@@ -143,6 +155,27 @@ class ServerConfig(Config):
|
||||
# When running as a daemon, the file to store the pid in
|
||||
pid_file: %(pid_file)s
|
||||
|
||||
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||
# process will be scheduled. It is represented as a bitmask, with the
|
||||
# lowest order bit corresponding to the first logical CPU and the
|
||||
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||
# may exist on a given system but a mask may specify more CPUs than are
|
||||
# present.
|
||||
#
|
||||
# For example:
|
||||
# 0x00000001 is processor #0,
|
||||
# 0x00000003 is processors #0 and #1,
|
||||
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||
#
|
||||
# Pinning a Python process to a single CPU is desirable, because Python
|
||||
# is inherently single-threaded due to the GIL, and can suffer a
|
||||
# 30-40%% slowdown due to cache blow-out and thread context switching
|
||||
# if the scheduler happens to schedule the underlying threads across
|
||||
# different cores. See
|
||||
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||
web_client: True
|
||||
|
||||
@@ -167,6 +200,10 @@ class ServerConfig(Config):
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# filter_timeline_limit: 5000
|
||||
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
|
||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class SpamCheckerConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.spam_checker = None
|
||||
|
||||
provider = config.get("spam_checker", None)
|
||||
if provider is not None:
|
||||
self.spam_checker = load_module(provider)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
@@ -32,6 +32,9 @@ class WorkerConfig(Config):
|
||||
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
|
||||
|
||||
if self.worker_listeners:
|
||||
for listener in self.worker_listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
|
||||
@@ -13,14 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from synapse.util import logcontext
|
||||
from twisted.web.http import HTTPClient
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.internet import defer, reactor
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
from synapse.util.logcontext import (
|
||||
preserve_context_over_fn, preserve_context_over_deferred
|
||||
)
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
@@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||
|
||||
for i in range(5):
|
||||
try:
|
||||
protocol = yield preserve_context_over_fn(
|
||||
endpoint.connect, factory
|
||||
)
|
||||
server_response, server_certificate = yield preserve_context_over_deferred(
|
||||
protocol.remote_key
|
||||
)
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
return
|
||||
with logcontext.PreserveLoggingContext():
|
||||
protocol = yield endpoint.connect(factory)
|
||||
server_response, server_certificate = yield protocol.remote_key
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
except SynapseKeyClientError as e:
|
||||
logger.exception("Error getting key for %r" % (server_name,))
|
||||
if e.status.startswith("4"):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,10 +16,9 @@
|
||||
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import ObservableDeferred
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.logcontext import (
|
||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||
PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -57,7 +57,8 @@ Attributes:
|
||||
json_object(dict): The JSON object to verify.
|
||||
deferred(twisted.internet.defer.Deferred):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched
|
||||
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||
logcontext.
|
||||
"""
|
||||
|
||||
|
||||
@@ -74,23 +75,32 @@ class Keyring(object):
|
||||
self.perspective_servers = self.config.perspectives
|
||||
self.hs = hs
|
||||
|
||||
# map from server name to Deferred. Has an entry for each server with
|
||||
# an ongoing key download; the Deferred completes once the download
|
||||
# completes.
|
||||
#
|
||||
# These are regular, logcontext-agnostic Deferreds.
|
||||
self.key_downloads = {}
|
||||
|
||||
def verify_json_for_server(self, server_name, json_object):
|
||||
return self.verify_json_objects_for_server(
|
||||
[(server_name, json_object)]
|
||||
)[0]
|
||||
return logcontext.make_deferred_yieldable(
|
||||
self.verify_json_objects_for_server(
|
||||
[(server_name, json_object)]
|
||||
)[0]
|
||||
)
|
||||
|
||||
def verify_json_objects_for_server(self, server_and_json):
|
||||
"""Bulk verfies signatures of json objects, bulk fetching keys as
|
||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||
necessary.
|
||||
|
||||
Args:
|
||||
server_and_json (list): List of pairs of (server_name, json_object)
|
||||
|
||||
Returns:
|
||||
list of deferreds indicating success or failure to verify each
|
||||
json object's signature for the given server_name.
|
||||
List<Deferred>: for each input pair, a deferred indicating success
|
||||
or failure to verify each json object's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
"""
|
||||
verify_requests = []
|
||||
|
||||
@@ -117,94 +127,72 @@ class Keyring(object):
|
||||
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_key_deferred(verify_request):
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Error downloading keys for %s" % (server_name,),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (server_name, key_ids),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
json_object = verify_request.json_object
|
||||
|
||||
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||
key_id, verify_key.alg, verify_key.version, server_name,
|
||||
))
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except:
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s" % (
|
||||
server_name, verify_key.alg, verify_key.version
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
server_to_deferred = {
|
||||
server_name: defer.Deferred()
|
||||
for server_name, _ in server_and_json
|
||||
}
|
||||
|
||||
with PreserveLoggingContext():
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
wait_on_deferred = self.wait_for_previous_lookups(
|
||||
[server_name for server_name, _ in server_and_json],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(verify_requests)
|
||||
)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_request_ids = {}
|
||||
|
||||
def remove_deferreds(res, server_name, verify_request):
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
||||
preserve_fn(self._start_key_lookups)(verify_requests)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
handle = preserve_fn(_handle_key_deferred)
|
||||
return [
|
||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
||||
for verify_request in verify_requests
|
||||
handle(rq) for rq in verify_requests
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_key_lookups(self, verify_requests):
|
||||
"""Sets off the key fetches for each verify request
|
||||
|
||||
Once each fetch completes, verify_request.deferred will be resolved.
|
||||
|
||||
Args:
|
||||
verify_requests (List[VerifyKeyRequest]):
|
||||
"""
|
||||
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
}
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
@@ -212,7 +200,13 @@ class Keyring(object):
|
||||
Args:
|
||||
server_names (list): list of server_names we want to lookup
|
||||
server_to_deferred (dict): server_name to deferred which gets
|
||||
resolved once we've finished looking up keys for that server
|
||||
resolved once we've finished looking up keys for that server.
|
||||
The Deferreds should be regular twisted ones which call their
|
||||
callbacks with no logcontext.
|
||||
|
||||
Returns: a Deferred which resolves once all key lookups for the given
|
||||
servers have completed. Follows the synapse rules of logcontext
|
||||
preservation.
|
||||
"""
|
||||
while True:
|
||||
wait_on = [
|
||||
@@ -226,17 +220,15 @@ class Keyring(object):
|
||||
else:
|
||||
break
|
||||
|
||||
def rm(r, server_name_):
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
return r
|
||||
|
||||
for server_name, deferred in server_to_deferred.items():
|
||||
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
||||
self.key_downloads[server_name] = d
|
||||
self.key_downloads[server_name] = deferred
|
||||
deferred.addBoth(rm, server_name)
|
||||
|
||||
def rm(r, server_name):
|
||||
self.key_downloads.pop(server_name, None)
|
||||
return r
|
||||
|
||||
d.addBoth(rm, server_name)
|
||||
|
||||
def get_server_verify_keys(self, verify_requests):
|
||||
def _get_server_verify_keys(self, verify_requests):
|
||||
"""Tries to find at least one key for each verify request
|
||||
|
||||
For each verify_request, verify_request.deferred is called back with
|
||||
@@ -305,21 +297,23 @@ class Keyring(object):
|
||||
if not missing_keys:
|
||||
break
|
||||
|
||||
for verify_request in requests_missing_keys.values():
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in requests_missing_keys:
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
|
||||
def on_err(err):
|
||||
for verify_request in verify_requests:
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in verify_requests:
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
do_iterations().addErrback(on_err)
|
||||
preserve_fn(do_iterations)().addErrback(on_err)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
@@ -333,7 +327,7 @@ class Keyring(object):
|
||||
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||
server_name -> key_id -> VerifyKey
|
||||
"""
|
||||
res = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
@@ -341,7 +335,7 @@ class Keyring(object):
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(dict(res))
|
||||
|
||||
@@ -362,13 +356,13 @@ class Keyring(object):
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
union_of_keys = {}
|
||||
for result in results:
|
||||
@@ -402,13 +396,13 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
merged = {}
|
||||
for result in results:
|
||||
@@ -485,7 +479,7 @@ class Keyring(object):
|
||||
for server_name, response_keys in processed_response.items():
|
||||
keys.setdefault(server_name, {}).update(response_keys)
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=server_name,
|
||||
@@ -495,7 +489,7 @@ class Keyring(object):
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@@ -543,7 +537,7 @@ class Keyring(object):
|
||||
|
||||
keys.update(response_keys)
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=key_server_name,
|
||||
@@ -553,7 +547,7 @@ class Keyring(object):
|
||||
for key_server_name, verify_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@@ -619,7 +613,7 @@ class Keyring(object):
|
||||
response_keys.update(verify_keys)
|
||||
response_keys.update(old_verify_keys)
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
server_name=server_name,
|
||||
@@ -632,7 +626,7 @@ class Keyring(object):
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
results[server_name] = response_keys
|
||||
|
||||
@@ -710,7 +704,6 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(verify_keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def store_keys(self, server_name, from_server, verify_keys):
|
||||
"""Store a collection of verify keys for a given server
|
||||
Args:
|
||||
@@ -721,7 +714,7 @@ class Keyring(object):
|
||||
A deferred that completes when the keys are stored.
|
||||
"""
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
server_name, server_name, key.time_added, key
|
||||
@@ -729,4 +722,48 @@ class Keyring(object):
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_key_deferred(verify_request):
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Error downloading keys for %s" % (server_name,),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (server_name, verify_request.key_ids),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
json_object = verify_request.json_object
|
||||
|
||||
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||
key_id, verify_key.alg, verify_key.version, server_name,
|
||||
))
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except:
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s" % (
|
||||
server_name, verify_key.alg, verify_key.version
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
47
synapse/events/spamcheck.py
Normal file
47
synapse/events/spamcheck.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
self.spam_checker = None
|
||||
|
||||
module = None
|
||||
config = None
|
||||
try:
|
||||
module, config = hs.config.spam_checker
|
||||
except:
|
||||
pass
|
||||
|
||||
if module is not None:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
sent by a local user. If it is sent by a user on another server, then
|
||||
users receive a blank event.
|
||||
|
||||
Args:
|
||||
event (synapse.events.EventBase): the event to be checked
|
||||
|
||||
Returns:
|
||||
bool: True if the event is spammy.
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
return self.spam_checker.check_event_for_spam(event)
|
||||
@@ -12,28 +12,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from twisted.internet import defer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationBase(object):
|
||||
def __init__(self, hs):
|
||||
pass
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
@@ -57,56 +49,52 @@ class FederationBase(object):
|
||||
"""
|
||||
deferreds = self._check_sigs_and_hashes(pdus)
|
||||
|
||||
def callback(pdu):
|
||||
return pdu
|
||||
@defer.inlineCallbacks
|
||||
def handle_check_result(pdu, deferred):
|
||||
try:
|
||||
res = yield logcontext.make_deferred_yieldable(deferred)
|
||||
except SynapseError:
|
||||
res = None
|
||||
|
||||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
return None
|
||||
|
||||
def try_local_db(res, pdu):
|
||||
if not res:
|
||||
# Check local db.
|
||||
return self.store.get_event(
|
||||
res = yield self.store.get_event(
|
||||
pdu.event_id,
|
||||
allow_rejected=True,
|
||||
allow_none=True,
|
||||
)
|
||||
return res
|
||||
|
||||
def try_remote(res, pdu):
|
||||
if not res and pdu.origin != origin:
|
||||
return self.get_pdu(
|
||||
destinations=[pdu.origin],
|
||||
event_id=pdu.event_id,
|
||||
outlier=outlier,
|
||||
timeout=10000,
|
||||
).addErrback(lambda e: None)
|
||||
return res
|
||||
try:
|
||||
res = yield self.get_pdu(
|
||||
destinations=[pdu.origin],
|
||||
event_id=pdu.event_id,
|
||||
outlier=outlier,
|
||||
timeout=10000,
|
||||
)
|
||||
except SynapseError:
|
||||
pass
|
||||
|
||||
def warn(res, pdu):
|
||||
if not res:
|
||||
logger.warn(
|
||||
"Failed to find copy of %s with valid signature",
|
||||
pdu.event_id,
|
||||
)
|
||||
return res
|
||||
|
||||
for pdu, deferred in zip(pdus, deferreds):
|
||||
deferred.addCallbacks(
|
||||
callback, errback, errbackArgs=[pdu]
|
||||
).addCallback(
|
||||
try_local_db, pdu
|
||||
).addCallback(
|
||||
try_remote, pdu
|
||||
).addCallback(
|
||||
warn, pdu
|
||||
defer.returnValue(res)
|
||||
|
||||
handle = logcontext.preserve_fn(handle_check_result)
|
||||
deferreds2 = [
|
||||
handle(pdu, deferred)
|
||||
for pdu, deferred in zip(pdus, deferreds)
|
||||
]
|
||||
|
||||
valid_pdus = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
deferreds2,
|
||||
consumeErrors=True,
|
||||
)
|
||||
|
||||
valid_pdus = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
deferreds,
|
||||
consumeErrors=True
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
defer.returnValue(valid_pdus)
|
||||
@@ -114,15 +102,24 @@ class FederationBase(object):
|
||||
defer.returnValue([p for p in valid_pdus if p])
|
||||
|
||||
def _check_sigs_and_hash(self, pdu):
|
||||
return self._check_sigs_and_hashes([pdu])[0]
|
||||
return logcontext.make_deferred_yieldable(
|
||||
self._check_sigs_and_hashes([pdu])[0],
|
||||
)
|
||||
|
||||
def _check_sigs_and_hashes(self, pdus):
|
||||
"""Throws a SynapseError if a PDU does not have the correct
|
||||
signatures.
|
||||
"""Checks that each of the received events is correctly signed by the
|
||||
sending server.
|
||||
|
||||
Args:
|
||||
pdus (list[FrozenEvent]): the events to be checked
|
||||
|
||||
Returns:
|
||||
FrozenEvent: Either the given event or it redacted if it failed the
|
||||
content hash check.
|
||||
list[Deferred]: for each input event, a deferred which:
|
||||
* returns the original event if the checks pass
|
||||
* returns a redacted version of the event (if the signature
|
||||
matched but the hash did not)
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
|
||||
redacted_pdus = [
|
||||
@@ -130,26 +127,38 @@ class FederationBase(object):
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = preserve_fn(self.keyring.verify_json_objects_for_server)([
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
ctx = logcontext.LoggingContext.current_context()
|
||||
|
||||
def callback(_, pdu, redacted):
|
||||
if not check_event_content_hash(pdu):
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return redacted
|
||||
return pdu
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
if not check_event_content_hash(pdu):
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return redacted
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return redacted
|
||||
|
||||
return pdu
|
||||
|
||||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
logger.warn(
|
||||
"Signature check failed for %s",
|
||||
pdu.event_id,
|
||||
)
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
"Signature check failed for %s",
|
||||
pdu.event_id,
|
||||
)
|
||||
return failure
|
||||
|
||||
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
||||
|
||||
@@ -22,7 +22,7 @@ from synapse.api.constants import Membership
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
@@ -189,10 +189,10 @@ class FederationClient(FederationBase):
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
self._check_sigs_and_hashes(pdus),
|
||||
consumeErrors=True,
|
||||
)).addErrback(unwrapFirstError)
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@@ -252,7 +252,7 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
signed_pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
signed_pdu = yield self._check_sigs_and_hash(pdu)
|
||||
|
||||
break
|
||||
|
||||
|
||||
@@ -187,6 +187,7 @@ class TransactionQueue(object):
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
destinations = set(destinations)
|
||||
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
|
||||
@@ -153,12 +153,10 @@ class Authenticator(object):
|
||||
class BaseFederationServlet(object):
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name,
|
||||
room_list_handler):
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
self.room_list_handler = room_list_handler
|
||||
|
||||
def _wrap(self, func):
|
||||
authenticator = self.authenticator
|
||||
@@ -590,7 +588,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
else:
|
||||
network_tuple = ThirdPartyInstanceID(None, None)
|
||||
|
||||
data = yield self.room_list_handler.get_local_public_room_list(
|
||||
data = yield self.handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
)
|
||||
@@ -611,7 +609,7 @@ class FederationVersionServlet(BaseFederationServlet):
|
||||
}))
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
@@ -634,17 +632,27 @@ SERVLET_CLASSES = (
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
OpenIdUserInfo,
|
||||
PublicRoomList,
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
ROOM_LIST_CLASSES = (
|
||||
PublicRoomList,
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
for servletclass in SERVLET_CLASSES:
|
||||
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_replication_layer(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
room_list_handler=hs.get_room_list_handler(),
|
||||
).register(resource)
|
||||
|
||||
for servletclass in ROOM_LIST_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_room_list_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
@@ -21,6 +21,7 @@ from synapse.api.constants import LoginType
|
||||
from synapse.types import UserID
|
||||
from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
|
||||
from twisted.web.client import PartialDownloadError
|
||||
|
||||
@@ -52,7 +53,15 @@ class AuthHandler(BaseHandler):
|
||||
LoginType.DUMMY: self._check_dummy_auth,
|
||||
}
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
self.sessions = {}
|
||||
|
||||
# This is not a cache per se, but a store of all current sessions that
|
||||
# expire after N hours
|
||||
self.sessions = ExpiringCache(
|
||||
cache_name="register_sessions",
|
||||
clock=hs.get_clock(),
|
||||
expiry_ms=self.SESSION_EXPIRE_MS,
|
||||
reset_expiry_on_get=True,
|
||||
)
|
||||
|
||||
account_handler = _AccountHandler(
|
||||
hs, check_user_exists=self.check_user_exists
|
||||
@@ -617,16 +626,6 @@ class AuthHandler(BaseHandler):
|
||||
logger.debug("Saving session %s", session)
|
||||
session["last_used"] = self.hs.get_clock().time_msec()
|
||||
self.sessions[session["id"]] = session
|
||||
self._prune_sessions()
|
||||
|
||||
def _prune_sessions(self):
|
||||
for sid, sess in self.sessions.items():
|
||||
last_used = 0
|
||||
if 'last_used' in sess:
|
||||
last_used = sess['last_used']
|
||||
now = self.hs.get_clock().time_msec()
|
||||
if last_used < now - AuthHandler.SESSION_EXPIRE_MS:
|
||||
del self.sessions[sid]
|
||||
|
||||
def hash(self, password):
|
||||
"""Computes a secure hash of password.
|
||||
|
||||
@@ -106,7 +106,7 @@ class DeviceHandler(BaseHandler):
|
||||
device_map = yield self.store.get_devices_by_user(user_id)
|
||||
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id) for device_id in device_map.keys())
|
||||
user_id, device_id=None
|
||||
)
|
||||
|
||||
devices = device_map.values()
|
||||
@@ -133,7 +133,7 @@ class DeviceHandler(BaseHandler):
|
||||
except errors.StoreError:
|
||||
raise errors.NotFoundError
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id),)
|
||||
user_id, device_id,
|
||||
)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
defer.returnValue(device)
|
||||
@@ -270,6 +270,8 @@ class DeviceHandler(BaseHandler):
|
||||
user_id (str)
|
||||
from_token (StreamToken)
|
||||
"""
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
|
||||
# First we check if any devices have changed
|
||||
@@ -280,11 +282,30 @@ class DeviceHandler(BaseHandler):
|
||||
# Then work out if any users have since joined
|
||||
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
|
||||
|
||||
member_events = yield self.store.get_membership_changes_for_user(
|
||||
user_id, from_token.room_key, now_token.room_key
|
||||
)
|
||||
rooms_changed.update(event.room_id for event in member_events)
|
||||
|
||||
stream_ordering = RoomStreamToken.parse_stream_token(
|
||||
from_token.room_key).stream
|
||||
from_token.room_key
|
||||
).stream
|
||||
|
||||
possibly_changed = set(changed)
|
||||
possibly_left = set()
|
||||
for room_id in rooms_changed:
|
||||
current_state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
|
||||
# The user may have left the room
|
||||
# TODO: Check if they actually did or if we were just invited.
|
||||
if room_id not in room_ids:
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_left.add(state_key)
|
||||
continue
|
||||
|
||||
# Fetch the current state at the time.
|
||||
try:
|
||||
event_ids = yield self.store.get_forward_extremeties_for_room(
|
||||
@@ -295,8 +316,6 @@ class DeviceHandler(BaseHandler):
|
||||
# ordering: treat it the same as a new room
|
||||
event_ids = []
|
||||
|
||||
current_state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
|
||||
# special-case for an empty prev state: include all members
|
||||
# in the changed list
|
||||
if not event_ids:
|
||||
@@ -307,9 +326,25 @@ class DeviceHandler(BaseHandler):
|
||||
possibly_changed.add(state_key)
|
||||
continue
|
||||
|
||||
current_member_id = current_state_ids.get((EventTypes.Member, user_id))
|
||||
if not current_member_id:
|
||||
continue
|
||||
|
||||
# mapping from event_id -> state_dict
|
||||
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
|
||||
|
||||
# Check if we've joined the room? If so we just blindly add all the users to
|
||||
# the "possibly changed" users.
|
||||
for state_dict in prev_state_ids.itervalues():
|
||||
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
||||
if not member_event or member_event != current_member_id:
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
break
|
||||
|
||||
# If there has been any change in membership, include them in the
|
||||
# possibly changed list. We'll check if they are joined below,
|
||||
# and we're not toooo worried about spuriously adding users.
|
||||
@@ -320,19 +355,30 @@ class DeviceHandler(BaseHandler):
|
||||
|
||||
# check if this member has changed since any of the extremities
|
||||
# at the stream_ordering, and add them to the list if so.
|
||||
for state_dict in prev_state_ids.values():
|
||||
for state_dict in prev_state_ids.itervalues():
|
||||
prev_event_id = state_dict.get(key, None)
|
||||
if not prev_event_id or prev_event_id != event_id:
|
||||
possibly_changed.add(state_key)
|
||||
if state_key != user_id:
|
||||
possibly_changed.add(state_key)
|
||||
break
|
||||
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
if possibly_changed or possibly_left:
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
# Take the intersection of the users whose devices may have changed
|
||||
# and those that actually still share a room with the user
|
||||
defer.returnValue(users_who_share_room & possibly_changed)
|
||||
# Take the intersection of the users whose devices may have changed
|
||||
# and those that actually still share a room with the user
|
||||
possibly_joined = possibly_changed & users_who_share_room
|
||||
possibly_left = (possibly_changed | possibly_left) - users_who_share_room
|
||||
else:
|
||||
possibly_joined = []
|
||||
possibly_left = []
|
||||
|
||||
defer.returnValue({
|
||||
"changed": list(possibly_joined),
|
||||
"left": list(possibly_left),
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
|
||||
@@ -43,7 +43,6 @@ from synapse.events.utils import prune_event
|
||||
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
from synapse.util.distributor import user_joined_room
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -75,6 +74,9 @@ class FederationHandler(BaseHandler):
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
self.keyring = hs.get_keyring()
|
||||
self.action_generator = hs.get_action_generator()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
self.replication_layer.set_handler(self)
|
||||
|
||||
@@ -832,7 +834,11 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, event_id):
|
||||
auth = yield self.store.get_auth_chain([event_id])
|
||||
event = yield self.store.get_event(event_id)
|
||||
auth = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
include_given=True
|
||||
)
|
||||
|
||||
for event in auth:
|
||||
event.signatures.update(
|
||||
@@ -1047,9 +1053,7 @@ class FederationHandler(BaseHandler):
|
||||
yield user_joined_room(self.distributor, user, event.room_id)
|
||||
|
||||
state_ids = context.prev_state_ids.values()
|
||||
auth_chain = yield self.store.get_auth_chain(set(
|
||||
[event.event_id] + state_ids
|
||||
))
|
||||
auth_chain = yield self.store.get_auth_chain(state_ids)
|
||||
|
||||
state = yield self.store.get_events(context.prev_state_ids.values())
|
||||
|
||||
@@ -1066,6 +1070,27 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
event = pdu
|
||||
|
||||
is_blocked = yield self.store.is_room_blocked(event.room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
if self.hs.config.block_non_admin_invites:
|
||||
raise SynapseError(403, "This server does not accept room invites")
|
||||
|
||||
membership = event.content.get("membership")
|
||||
if event.type != EventTypes.Member or membership != Membership.INVITE:
|
||||
raise SynapseError(400, "The event was not an m.room.member invite event")
|
||||
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if sender_domain != origin:
|
||||
raise SynapseError(400, "The invite event was not from the server sending it")
|
||||
|
||||
if event.state_key is None:
|
||||
raise SynapseError(400, "The invite event did not have a state key")
|
||||
|
||||
if not self.is_mine_id(event.state_key):
|
||||
raise SynapseError(400, "The invite event must be for this server")
|
||||
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.invite_from_remote = True
|
||||
|
||||
@@ -1100,6 +1125,9 @@ class FederationHandler(BaseHandler):
|
||||
user_id,
|
||||
"leave"
|
||||
)
|
||||
# Mark as outlier as we don't have any state for this event; we're not
|
||||
# even in the room.
|
||||
event.internal_metadata.outlier = True
|
||||
event = self._sign_event(event)
|
||||
|
||||
# Try the host that we succesfully called /make_leave/ on first for
|
||||
@@ -1271,7 +1299,7 @@ class FederationHandler(BaseHandler):
|
||||
for event in res:
|
||||
# We sign these again because there was a bug where we
|
||||
# incorrectly signed things the first time round
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
if self.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
@@ -1344,7 +1372,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if event:
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
if self.is_mine_id(event.event_id):
|
||||
# FIXME: This is a temporary work around where we occasionally
|
||||
# return events slightly differently than when they were
|
||||
# originally signed
|
||||
@@ -1388,9 +1416,8 @@ class FederationHandler(BaseHandler):
|
||||
auth_events=auth_events,
|
||||
)
|
||||
|
||||
if not event.internal_metadata.is_outlier():
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
if not event.internal_metadata.is_outlier() and not backfilled:
|
||||
yield self.action_generator.handle_push_actions_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
@@ -1403,7 +1430,7 @@ class FederationHandler(BaseHandler):
|
||||
if not backfilled:
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
@@ -1582,7 +1609,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
if event.type == EventTypes.GuestAccess:
|
||||
if event.type == EventTypes.GuestAccess and not context.rejected:
|
||||
yield self.maybe_kick_guest_users(event)
|
||||
|
||||
defer.returnValue(context)
|
||||
@@ -1599,7 +1626,11 @@ class FederationHandler(BaseHandler):
|
||||
pass
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = yield self.store.get_auth_chain([event_id])
|
||||
event = yield self.store.get_event(event_id)
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
include_given=True
|
||||
)
|
||||
|
||||
# TODO: Check if we would now reject event_id. If so we need to tell
|
||||
# everyone.
|
||||
@@ -1792,7 +1823,9 @@ class FederationHandler(BaseHandler):
|
||||
auth_ids = yield self.auth.compute_auth_events(
|
||||
event, context.prev_state_ids
|
||||
)
|
||||
local_auth_chain = yield self.store.get_auth_chain(auth_ids)
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
auth_ids, include_given=True
|
||||
)
|
||||
|
||||
try:
|
||||
# 2. Get remote difference.
|
||||
@@ -2060,6 +2093,14 @@ class FederationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
||||
"""Handle an exchange_third_party_invite request from a remote server
|
||||
|
||||
The remote server will call this when it wants to turn a 3pid invite
|
||||
into a normal m.room.member invite.
|
||||
|
||||
Returns:
|
||||
Deferred: resolves (to None)
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
@@ -2078,9 +2119,12 @@ class FederationHandler(BaseHandler):
|
||||
raise e
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
# XXX we send the invite here, but send_membership_event also sends it,
|
||||
# so we end up making two requests. I think this is redundant.
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(returned_invite.signatures)
|
||||
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
@@ -20,7 +19,6 @@ from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
from synapse.types import (
|
||||
UserID, RoomAlias, RoomStreamToken,
|
||||
)
|
||||
@@ -35,6 +33,7 @@ from canonicaljson import encode_canonical_json
|
||||
|
||||
import logging
|
||||
import random
|
||||
import ujson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -50,10 +49,16 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
# We arbitrarily limit concurrent event creation for a room to 5.
|
||||
# This is to stop us from diverging history *too* much.
|
||||
self.limiter = Limiter(max_count=5)
|
||||
|
||||
self.action_generator = hs.get_action_generator()
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def purge_history(self, room_id, event_id):
|
||||
event = yield self.store.get_event(event_id)
|
||||
@@ -317,6 +322,12 @@ class MessageHandler(BaseHandler):
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
if self.spam_checker.check_event_for_spam(event):
|
||||
raise SynapseError(
|
||||
403, "Spam is not permitted here", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
@@ -408,6 +419,51 @@ class MessageHandler(BaseHandler):
|
||||
[serialize_event(c, now) for c in room_state.values()]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_joined_members(self, requester, room_id):
|
||||
"""Get all the joined members in the room and their profile information.
|
||||
|
||||
If the user has left the room return the state events from when they left.
|
||||
|
||||
Args:
|
||||
requester(Requester): The user requesting state events.
|
||||
room_id(str): The room ID to get all state events from.
|
||||
Returns:
|
||||
A dict of user_id to profile info
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
if not requester.app_service:
|
||||
# We check AS auth after fetching the room membership, as it
|
||||
# requires us to pull out all joined members anyway.
|
||||
membership, _ = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
if membership != Membership.JOIN:
|
||||
raise NotImplementedError(
|
||||
"Getting joined members after leaving is not implemented"
|
||||
)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
|
||||
# If this is an AS, double check that they are allowed to see the members.
|
||||
# This can either be because the AS user is in the room or becuase there
|
||||
# is a user in the room that the AS is "interested in"
|
||||
if requester.app_service and user_id not in users_with_profile:
|
||||
for uid in users_with_profile:
|
||||
if requester.app_service.is_interested_in_user(uid):
|
||||
break
|
||||
else:
|
||||
# Loop fell through, AS has no interested users in room
|
||||
raise AuthError(403, "Appservice not in room")
|
||||
|
||||
defer.returnValue({
|
||||
user_id: {
|
||||
"avatar_url": profile.avatar_url,
|
||||
"display_name": profile.display_name,
|
||||
}
|
||||
for user_id, profile in users_with_profile.iteritems()
|
||||
})
|
||||
|
||||
@measure_func("_create_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder, requester=None, prev_event_ids=None):
|
||||
@@ -497,6 +553,14 @@ class MessageHandler(BaseHandler):
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
|
||||
# Ensure that we can round trip before trying to persist in db
|
||||
try:
|
||||
dump = ujson.dumps(event.content)
|
||||
ujson.loads(dump)
|
||||
except:
|
||||
logger.exception("Failed to encode content: %r", event.content)
|
||||
raise
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context)
|
||||
|
||||
if event.type == EventTypes.CanonicalAlias:
|
||||
@@ -590,8 +654,7 @@ class MessageHandler(BaseHandler):
|
||||
"Changing the room create event is forbidden",
|
||||
)
|
||||
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
yield self.action_generator.handle_push_actions_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
@@ -601,7 +664,7 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(self, requester, config):
|
||||
def create_room(self, requester, config, ratelimit=True):
|
||||
""" Creates a new room.
|
||||
|
||||
Args:
|
||||
@@ -75,7 +75,8 @@ class RoomCreationHandler(BaseHandler):
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
yield self.ratelimit(requester)
|
||||
if ratelimit:
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
if "room_alias_name" in config:
|
||||
for wchar in string.whitespace:
|
||||
@@ -167,6 +168,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=config.get("power_level_content_override", {})
|
||||
)
|
||||
|
||||
if "name" in config:
|
||||
@@ -245,7 +247,8 @@ class RoomCreationHandler(BaseHandler):
|
||||
invite_list,
|
||||
initial_state,
|
||||
creation_content,
|
||||
room_alias
|
||||
room_alias,
|
||||
power_level_content_override,
|
||||
):
|
||||
def create(etype, content, **kwargs):
|
||||
e = {
|
||||
@@ -291,7 +294,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
if (EventTypes.PowerLevels, '') not in initial_state:
|
||||
# We treat the power levels override specially as this needs to be one
|
||||
# of the first events that get sent into a room.
|
||||
pl_content = initial_state.pop((EventTypes.PowerLevels, ''), None)
|
||||
if pl_content is not None:
|
||||
yield send(
|
||||
etype=EventTypes.PowerLevels,
|
||||
content=pl_content,
|
||||
)
|
||||
else:
|
||||
power_level_content = {
|
||||
"users": {
|
||||
creator_id: 100,
|
||||
@@ -316,6 +327,8 @@ class RoomCreationHandler(BaseHandler):
|
||||
for invitee in invite_list:
|
||||
power_level_content["users"][invitee] = 100
|
||||
|
||||
power_level_content.update(power_level_content_override)
|
||||
|
||||
yield send(
|
||||
etype=EventTypes.PowerLevels,
|
||||
content=power_level_content,
|
||||
|
||||
@@ -191,6 +191,8 @@ class RoomMemberHandler(BaseHandler):
|
||||
if action in ["kick", "unban"]:
|
||||
effective_membership_state = "leave"
|
||||
|
||||
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
||||
# invite into a normal invite before we can handle the join.
|
||||
if third_party_signed is not None:
|
||||
replication = self.hs.get_replication_layer()
|
||||
yield replication.exchange_third_party_invite(
|
||||
@@ -203,6 +205,21 @@ class RoomMemberHandler(BaseHandler):
|
||||
if not remote_room_hosts:
|
||||
remote_room_hosts = []
|
||||
|
||||
if effective_membership_state not in ("leave", "ban",):
|
||||
is_blocked = yield self.store.is_room_blocked(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
if (effective_membership_state == "invite" and
|
||||
self.hs.config.block_non_admin_invites):
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
if not is_requester_admin:
|
||||
raise SynapseError(
|
||||
403, "Invites have been disabled on this server",
|
||||
)
|
||||
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(
|
||||
room_id, latest_event_ids=latest_event_ids,
|
||||
@@ -369,6 +386,11 @@ class RoomMemberHandler(BaseHandler):
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
if event.membership not in (Membership.LEAVE, Membership.BAN):
|
||||
is_blocked = yield self.store.is_room_blocked(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
yield message_handler.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
@@ -461,6 +483,16 @@ class RoomMemberHandler(BaseHandler):
|
||||
requester,
|
||||
txn_id
|
||||
):
|
||||
if self.hs.config.block_non_admin_invites:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
if not is_requester_admin:
|
||||
raise SynapseError(
|
||||
403, "Invites have been disabled on this server",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
invitee = yield self._lookup_3pid(
|
||||
id_server, medium, address
|
||||
)
|
||||
|
||||
@@ -108,6 +108,16 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
|
||||
return True
|
||||
|
||||
|
||||
class DeviceLists(collections.namedtuple("DeviceLists", [
|
||||
"changed", # list of user_ids whose devices may have changed
|
||||
"left", # list of user_ids whose devices we no longer track
|
||||
])):
|
||||
__slots__ = []
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.changed or self.left)
|
||||
|
||||
|
||||
class SyncResult(collections.namedtuple("SyncResult", [
|
||||
"next_batch", # Token for the next sync
|
||||
"presence", # List of presence events for the user.
|
||||
@@ -117,6 +127,8 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
||||
"archived", # ArchivedSyncResult for each archived room.
|
||||
"to_device", # List of direct messages for the device.
|
||||
"device_lists", # List of user_ids whose devices have chanegd
|
||||
"device_one_time_keys_count", # Dict of algorithm to count for one time keys
|
||||
# for this device
|
||||
])):
|
||||
__slots__ = []
|
||||
|
||||
@@ -288,10 +300,20 @@ class SyncHandler(object):
|
||||
|
||||
if recents:
|
||||
recents = sync_config.filter_collection.filter_room_timeline(recents)
|
||||
|
||||
# We check if there are any state events, if there are then we pass
|
||||
# all current state events to the filter_events function. This is to
|
||||
# ensure that we always include current state in the timeline
|
||||
current_state_ids = frozenset()
|
||||
if any(e.is_state() for e in recents):
|
||||
current_state_ids = yield self.state.get_current_state_ids(room_id)
|
||||
current_state_ids = frozenset(current_state_ids.itervalues())
|
||||
|
||||
recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
)
|
||||
else:
|
||||
recents = []
|
||||
@@ -323,10 +345,20 @@ class SyncHandler(object):
|
||||
loaded_recents = sync_config.filter_collection.filter_room_timeline(
|
||||
events
|
||||
)
|
||||
|
||||
# We check if there are any state events, if there are then we pass
|
||||
# all current state events to the filter_events function. This is to
|
||||
# ensure that we always include current state in the timeline
|
||||
current_state_ids = frozenset()
|
||||
if any(e.is_state() for e in loaded_recents):
|
||||
current_state_ids = yield self.state.get_current_state_ids(room_id)
|
||||
current_state_ids = frozenset(current_state_ids.itervalues())
|
||||
|
||||
loaded_recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
)
|
||||
loaded_recents.extend(recents)
|
||||
recents = loaded_recents
|
||||
@@ -533,7 +565,8 @@ class SyncHandler(object):
|
||||
res = yield self._generate_sync_entry_for_rooms(
|
||||
sync_result_builder, account_data_by_room
|
||||
)
|
||||
newly_joined_rooms, newly_joined_users = res
|
||||
newly_joined_rooms, newly_joined_users, _, _ = res
|
||||
_, _, newly_left_rooms, newly_left_users = res
|
||||
|
||||
block_all_presence_data = (
|
||||
since_token is None and
|
||||
@@ -547,9 +580,21 @@ class SyncHandler(object):
|
||||
yield self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
device_lists = yield self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder
|
||||
sync_result_builder,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_users=newly_joined_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
device_id = sync_config.device_id
|
||||
one_time_key_counts = {}
|
||||
if device_id:
|
||||
user_id = sync_config.user.to_string()
|
||||
one_time_key_counts = yield self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
|
||||
defer.returnValue(SyncResult(
|
||||
presence=sync_result_builder.presence,
|
||||
account_data=sync_result_builder.account_data,
|
||||
@@ -558,30 +603,56 @@ class SyncHandler(object):
|
||||
archived=sync_result_builder.archived,
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_key_counts,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
))
|
||||
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder):
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder,
|
||||
newly_joined_rooms, newly_joined_users,
|
||||
newly_left_rooms, newly_left_users):
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
|
||||
if since_token and since_token.device_list_key:
|
||||
room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
|
||||
user_ids_changed = set()
|
||||
changed = yield self.store.get_user_whose_devices_changed(
|
||||
since_token.device_list_key
|
||||
)
|
||||
for other_user_id in changed:
|
||||
other_room_ids = yield self.store.get_rooms_for_user(other_user_id)
|
||||
if room_ids.intersection(other_room_ids):
|
||||
user_ids_changed.add(other_user_id)
|
||||
|
||||
defer.returnValue(user_ids_changed)
|
||||
# TODO: Be more clever than this, i.e. remove users who we already
|
||||
# share a room with?
|
||||
for room_id in newly_joined_rooms:
|
||||
joined_users = yield self.state.get_current_user_in_room(room_id)
|
||||
newly_joined_users.update(joined_users)
|
||||
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = yield self.state.get_current_user_in_room(room_id)
|
||||
newly_left_users.update(left_users)
|
||||
|
||||
# TODO: Check that these users are actually new, i.e. either they
|
||||
# weren't in the previous sync *or* they left and rejoined.
|
||||
changed.update(newly_joined_users)
|
||||
|
||||
if not changed and not newly_left_users:
|
||||
defer.returnValue(DeviceLists(
|
||||
changed=[],
|
||||
left=newly_left_users,
|
||||
))
|
||||
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
defer.returnValue(DeviceLists(
|
||||
changed=users_who_share_room & changed,
|
||||
left=set(newly_left_users) - users_who_share_room,
|
||||
))
|
||||
else:
|
||||
defer.returnValue([])
|
||||
defer.returnValue(DeviceLists(
|
||||
changed=[],
|
||||
left=[],
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_to_device(self, sync_result_builder):
|
||||
@@ -745,8 +816,8 @@ class SyncHandler(object):
|
||||
account_data_by_room(dict): Dictionary of per room account data
|
||||
|
||||
Returns:
|
||||
Deferred(tuple): Returns a 2-tuple of
|
||||
`(newly_joined_rooms, newly_joined_users)`
|
||||
Deferred(tuple): Returns a 4-tuple of
|
||||
`(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)`
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
block_all_room_ephemeral = (
|
||||
@@ -777,7 +848,7 @@ class SyncHandler(object):
|
||||
)
|
||||
if not tags_by_room:
|
||||
logger.debug("no-oping sync")
|
||||
defer.returnValue(([], []))
|
||||
defer.returnValue(([], [], [], []))
|
||||
|
||||
ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
|
||||
"m.ignored_user_list", user_id=user_id,
|
||||
@@ -790,7 +861,7 @@ class SyncHandler(object):
|
||||
|
||||
if since_token:
|
||||
res = yield self._get_rooms_changed(sync_result_builder, ignored_users)
|
||||
room_entries, invited, newly_joined_rooms = res
|
||||
room_entries, invited, newly_joined_rooms, newly_left_rooms = res
|
||||
|
||||
tags_by_room = yield self.store.get_updated_tags(
|
||||
user_id, since_token.account_data_key,
|
||||
@@ -798,6 +869,7 @@ class SyncHandler(object):
|
||||
else:
|
||||
res = yield self._get_all_rooms(sync_result_builder, ignored_users)
|
||||
room_entries, invited, newly_joined_rooms = res
|
||||
newly_left_rooms = []
|
||||
|
||||
tags_by_room = yield self.store.get_tags_for_user(user_id)
|
||||
|
||||
@@ -818,17 +890,30 @@ class SyncHandler(object):
|
||||
|
||||
# Now we want to get any newly joined users
|
||||
newly_joined_users = set()
|
||||
newly_left_users = set()
|
||||
if since_token:
|
||||
for joined_sync in sync_result_builder.joined:
|
||||
it = itertools.chain(
|
||||
joined_sync.timeline.events, joined_sync.state.values()
|
||||
joined_sync.timeline.events, joined_sync.state.itervalues()
|
||||
)
|
||||
for event in it:
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
newly_joined_users.add(event.state_key)
|
||||
else:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership == Membership.JOIN:
|
||||
newly_left_users.add(event.state_key)
|
||||
|
||||
defer.returnValue((newly_joined_rooms, newly_joined_users))
|
||||
newly_left_users -= newly_joined_users
|
||||
|
||||
defer.returnValue((
|
||||
newly_joined_rooms,
|
||||
newly_joined_users,
|
||||
newly_left_rooms,
|
||||
newly_left_users,
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _have_rooms_changed(self, sync_result_builder):
|
||||
@@ -898,15 +983,28 @@ class SyncHandler(object):
|
||||
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
|
||||
|
||||
newly_joined_rooms = []
|
||||
newly_left_rooms = []
|
||||
room_entries = []
|
||||
invited = []
|
||||
for room_id, events in mem_change_events_by_room_id.items():
|
||||
for room_id, events in mem_change_events_by_room_id.iteritems():
|
||||
non_joins = [e for e in events if e.membership != Membership.JOIN]
|
||||
has_join = len(non_joins) != len(events)
|
||||
|
||||
# We want to figure out if we joined the room at some point since
|
||||
# the last sync (even if we have since left). This is to make sure
|
||||
# we do send down the room, and with full state, where necessary
|
||||
|
||||
old_state_ids = None
|
||||
if room_id in joined_room_ids and non_joins:
|
||||
# Always include if the user (re)joined the room, especially
|
||||
# important so that device list changes are calculated correctly.
|
||||
# If there are non join member events, but we are still in the room,
|
||||
# then the user must have left and joined
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
# User is in the room so we don't need to do the invite/leave checks
|
||||
continue
|
||||
|
||||
if room_id in joined_room_ids or has_join:
|
||||
old_state_ids = yield self.get_state_at(room_id, since_token)
|
||||
old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None)
|
||||
@@ -918,12 +1016,33 @@ class SyncHandler(object):
|
||||
if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
if room_id in joined_room_ids:
|
||||
continue
|
||||
# If user is in the room then we don't need to do the invite/leave checks
|
||||
if room_id in joined_room_ids:
|
||||
continue
|
||||
|
||||
if not non_joins:
|
||||
continue
|
||||
|
||||
# Check if we have left the room. This can either be because we were
|
||||
# joined before *or* that we since joined and then left.
|
||||
if events[-1].membership != Membership.JOIN:
|
||||
if has_join:
|
||||
newly_left_rooms.append(room_id)
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = yield self.get_state_at(room_id, since_token)
|
||||
old_mem_ev_id = old_state_ids.get(
|
||||
(EventTypes.Member, user_id),
|
||||
None,
|
||||
)
|
||||
old_mem_ev = None
|
||||
if old_mem_ev_id:
|
||||
old_mem_ev = yield self.store.get_event(
|
||||
old_mem_ev_id, allow_none=True
|
||||
)
|
||||
if old_mem_ev and old_mem_ev.membership == Membership.JOIN:
|
||||
newly_left_rooms.append(room_id)
|
||||
|
||||
# Only bother if we're still currently invited
|
||||
should_invite = non_joins[-1].membership == Membership.INVITE
|
||||
if should_invite:
|
||||
@@ -1001,7 +1120,7 @@ class SyncHandler(object):
|
||||
upto_token=since_token,
|
||||
))
|
||||
|
||||
defer.returnValue((room_entries, invited, newly_joined_rooms))
|
||||
defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_all_rooms(self, sync_result_builder, ignored_users):
|
||||
@@ -1249,6 +1368,7 @@ class SyncResultBuilder(object):
|
||||
self.invited = []
|
||||
self.archived = []
|
||||
self.device = []
|
||||
self.to_device = []
|
||||
|
||||
|
||||
class RoomSyncResultBuilder(object):
|
||||
|
||||
@@ -89,7 +89,7 @@ class TypingHandler(object):
|
||||
until = self._member_typing_until.get(member, None)
|
||||
if not until or until <= now:
|
||||
logger.info("Timing out typing for: %s", member.user_id)
|
||||
preserve_fn(self._stopped_typing)(member)
|
||||
self._stopped_typing(member)
|
||||
continue
|
||||
|
||||
# Check if we need to resend a keep alive over federation for this
|
||||
@@ -147,7 +147,7 @@ class TypingHandler(object):
|
||||
# No point sending another notification
|
||||
defer.returnValue(None)
|
||||
|
||||
yield self._push_update(
|
||||
self._push_update(
|
||||
member=member,
|
||||
typing=True,
|
||||
)
|
||||
@@ -171,7 +171,7 @@ class TypingHandler(object):
|
||||
|
||||
member = RoomMember(room_id=room_id, user_id=target_user_id)
|
||||
|
||||
yield self._stopped_typing(member)
|
||||
self._stopped_typing(member)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_left_room(self, user, room_id):
|
||||
@@ -180,7 +180,6 @@ class TypingHandler(object):
|
||||
member = RoomMember(room_id=room_id, user_id=user_id)
|
||||
yield self._stopped_typing(member)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _stopped_typing(self, member):
|
||||
if member.user_id not in self._room_typing.get(member.room_id, set()):
|
||||
# No point
|
||||
@@ -189,16 +188,15 @@ class TypingHandler(object):
|
||||
self._member_typing_until.pop(member, None)
|
||||
self._member_last_federation_poke.pop(member, None)
|
||||
|
||||
yield self._push_update(
|
||||
self._push_update(
|
||||
member=member,
|
||||
typing=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _push_update(self, member, typing):
|
||||
if self.hs.is_mine_id(member.user_id):
|
||||
# Only send updates for changes to our own users.
|
||||
yield self._push_remote(member, typing)
|
||||
preserve_fn(self._push_remote)(member, typing)
|
||||
|
||||
self._push_update_local(
|
||||
member=member,
|
||||
|
||||
641
synapse/handlers/user_directory.py
Normal file
641
synapse/handlers/user_directory.py
Normal file
@@ -0,0 +1,641 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.async import sleep
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserDirectoyHandler(object):
|
||||
"""Handles querying of and keeping updated the user_directory.
|
||||
|
||||
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
|
||||
|
||||
The user directory is filled with users who this server can see are joined to a
|
||||
world_readable or publically joinable room. We keep a database table up to date
|
||||
by streaming changes of the current state and recalculating whether users should
|
||||
be in the directory or not when necessary.
|
||||
|
||||
For each user in the directory we also store a room_id which is public and that the
|
||||
user is joined to. This allows us to ignore history_visibility and join_rules changes
|
||||
for that user in all other public rooms, as we know they'll still be in at least
|
||||
one public room.
|
||||
"""
|
||||
|
||||
INITIAL_SLEEP_MS = 50
|
||||
INITIAL_SLEEP_COUNT = 100
|
||||
INITIAL_BATCH_SIZE = 100
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.update_user_directory = hs.config.update_user_directory
|
||||
|
||||
# When start up for the first time we need to populate the user_directory.
|
||||
# This is a set of user_id's we've inserted already
|
||||
self.initially_handled_users = set()
|
||||
self.initially_handled_users_in_public = set()
|
||||
|
||||
self.initially_handled_users_share = set()
|
||||
self.initially_handled_users_share_private_room = set()
|
||||
|
||||
# The current position in the current_state_delta stream
|
||||
self.pos = None
|
||||
|
||||
# Guard to ensure we only process deltas one at a time
|
||||
self._is_processing = False
|
||||
|
||||
if self.update_user_directory:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating the user directory
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
|
||||
def search_users(self, user_id, search_term, limit):
|
||||
"""Searches for users in directory
|
||||
|
||||
Returns:
|
||||
dict of the form::
|
||||
|
||||
{
|
||||
"limited": <bool>, # whether there were more results or not
|
||||
"results": [ # Ordered by best match first
|
||||
{
|
||||
"user_id": <user_id>,
|
||||
"display_name": <display_name>,
|
||||
"avatar_url": <avatar_url>
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
return self.store.search_user_dir(user_id, search_term, limit)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_new_event(self):
|
||||
"""Called when there may be more deltas to process
|
||||
"""
|
||||
if not self.update_user_directory:
|
||||
return
|
||||
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
self._is_processing = True
|
||||
try:
|
||||
yield self._unsafe_process()
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _unsafe_process(self):
|
||||
# If self.pos is None then means we haven't fetched it from DB
|
||||
if self.pos is None:
|
||||
self.pos = yield self.store.get_user_directory_stream_pos()
|
||||
|
||||
# If still None then we need to do the initial fill of directory
|
||||
if self.pos is None:
|
||||
yield self._do_initial_spam()
|
||||
self.pos = yield self.store.get_user_directory_stream_pos()
|
||||
|
||||
# Loop round handling deltas until we're up to date
|
||||
while True:
|
||||
with Measure(self.clock, "user_dir_delta"):
|
||||
deltas = yield self.store.get_current_state_deltas(self.pos)
|
||||
if not deltas:
|
||||
return
|
||||
|
||||
logger.info("Handling %d state deltas", len(deltas))
|
||||
yield self._handle_deltas(deltas)
|
||||
|
||||
self.pos = deltas[-1]["stream_id"]
|
||||
yield self.store.update_user_directory_stream_pos(self.pos)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_initial_spam(self):
|
||||
"""Populates the user_directory from the current state of the DB, used
|
||||
when synapse first starts with user_directory support
|
||||
"""
|
||||
new_pos = yield self.store.get_max_stream_id_in_current_state_deltas()
|
||||
|
||||
# Delete any existing entries just in case there are any
|
||||
yield self.store.delete_all_from_user_dir()
|
||||
|
||||
# We process by going through each existing room at a time.
|
||||
room_ids = yield self.store.get_all_rooms()
|
||||
|
||||
logger.info("Doing initial update of user directory. %d rooms", len(room_ids))
|
||||
num_processed_rooms = 1
|
||||
|
||||
for room_id in room_ids:
|
||||
logger.info("Handling room %d/%d", num_processed_rooms, len(room_ids))
|
||||
yield self._handle_intial_room(room_id)
|
||||
num_processed_rooms += 1
|
||||
yield sleep(self.INITIAL_SLEEP_MS / 1000.)
|
||||
|
||||
logger.info("Processed all rooms.")
|
||||
|
||||
self.initially_handled_users = None
|
||||
self.initially_handled_users_in_public = None
|
||||
self.initially_handled_users_share = None
|
||||
self.initially_handled_users_share_private_room = None
|
||||
|
||||
yield self.store.update_user_directory_stream_pos(new_pos)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_intial_room(self, room_id):
|
||||
"""Called when we initially fill out user_directory one room at a time
|
||||
"""
|
||||
is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
|
||||
if not is_in_room:
|
||||
return
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
user_ids = set(users_with_profile)
|
||||
unhandled_users = user_ids - self.initially_handled_users
|
||||
|
||||
yield self.store.add_profiles_to_user_dir(
|
||||
room_id, {
|
||||
user_id: users_with_profile[user_id] for user_id in unhandled_users
|
||||
}
|
||||
)
|
||||
|
||||
self.initially_handled_users |= unhandled_users
|
||||
|
||||
if is_public:
|
||||
yield self.store.add_users_to_public_room(
|
||||
room_id,
|
||||
user_ids=user_ids - self.initially_handled_users_in_public
|
||||
)
|
||||
self.initially_handled_users_in_public |= user_ids
|
||||
|
||||
# We now go and figure out the new users who share rooms with user entries
|
||||
# We sleep aggressively here as otherwise it can starve resources.
|
||||
# We also batch up inserts/updates, but try to avoid too many at once.
|
||||
to_insert = set()
|
||||
to_update = set()
|
||||
count = 0
|
||||
for user_id in user_ids:
|
||||
if count % self.INITIAL_SLEEP_COUNT == 0:
|
||||
yield sleep(self.INITIAL_SLEEP_MS / 1000.)
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
count += 1
|
||||
continue
|
||||
|
||||
if self.store.get_if_app_services_interested_in_user(user_id):
|
||||
count += 1
|
||||
continue
|
||||
|
||||
for other_user_id in user_ids:
|
||||
if user_id == other_user_id:
|
||||
continue
|
||||
|
||||
if count % self.INITIAL_SLEEP_COUNT == 0:
|
||||
yield sleep(self.INITIAL_SLEEP_MS / 1000.)
|
||||
count += 1
|
||||
|
||||
user_set = (user_id, other_user_id)
|
||||
|
||||
if user_set in self.initially_handled_users_share_private_room:
|
||||
continue
|
||||
|
||||
if user_set in self.initially_handled_users_share:
|
||||
if is_public:
|
||||
continue
|
||||
to_update.add(user_set)
|
||||
else:
|
||||
to_insert.add(user_set)
|
||||
|
||||
if is_public:
|
||||
self.initially_handled_users_share.add(user_set)
|
||||
else:
|
||||
self.initially_handled_users_share_private_room.add(user_set)
|
||||
|
||||
if len(to_insert) > self.INITIAL_BATCH_SIZE:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
to_insert.clear()
|
||||
|
||||
if len(to_update) > self.INITIAL_BATCH_SIZE:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
to_insert.clear()
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_deltas(self, deltas):
|
||||
"""Called with the state deltas to process
|
||||
"""
|
||||
for delta in deltas:
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
|
||||
# For join rule and visibility changes we need to check if the room
|
||||
# may have become public or not and add/remove the users in said room
|
||||
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
|
||||
yield self._handle_room_publicity_change(
|
||||
room_id, prev_event_id, event_id, typ,
|
||||
)
|
||||
elif typ == EventTypes.Member:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
key_name="membership",
|
||||
public_value=Membership.JOIN,
|
||||
)
|
||||
|
||||
if change is None:
|
||||
# Handle any profile changes
|
||||
yield self._handle_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
if not change:
|
||||
# Need to check if the server left the room entirely, if so
|
||||
# we might need to remove all the users in that room
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
room_id, self.server_name,
|
||||
)
|
||||
if not is_in_room:
|
||||
logger.info("Server left room: %r", room_id)
|
||||
# Fetch all the users that we marked as being in user
|
||||
# directory due to being in the room and then check if
|
||||
# need to remove those users or not
|
||||
user_ids = yield self.store.get_users_in_dir_due_to_room(room_id)
|
||||
for user_id in user_ids:
|
||||
yield self._handle_remove_user(room_id, user_id)
|
||||
return
|
||||
else:
|
||||
logger.debug("Server is still in room: %r", room_id)
|
||||
|
||||
if change: # The user joined
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
profile = ProfileInfo(
|
||||
avatar_url=event.content.get("avatar_url"),
|
||||
display_name=event.content.get("displayname"),
|
||||
)
|
||||
|
||||
yield self._handle_new_user(room_id, state_key, profile)
|
||||
else: # The user left
|
||||
yield self._handle_remove_user(room_id, state_key)
|
||||
else:
|
||||
logger.debug("Ignoring irrelevant type: %r", typ)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_room_publicity_change(self, room_id, prev_event_id, event_id, typ):
|
||||
"""Handle a room having potentially changed from/to world_readable/publically
|
||||
joinable.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
prev_event_id (str|None): The previous event before the state change
|
||||
event_id (str|None): The new event after the state change
|
||||
typ (str): Type of the event
|
||||
"""
|
||||
logger.debug("Handling change for %s: %s", typ, room_id)
|
||||
|
||||
if typ == EventTypes.RoomHistoryVisibility:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
key_name="history_visibility",
|
||||
public_value="world_readable",
|
||||
)
|
||||
elif typ == EventTypes.JoinRules:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
key_name="join_rule",
|
||||
public_value=JoinRules.PUBLIC,
|
||||
)
|
||||
else:
|
||||
raise Exception("Invalid event type")
|
||||
# If change is None, no change. True => become world_readable/public,
|
||||
# False => was world_readable/public
|
||||
if change is None:
|
||||
logger.debug("No change")
|
||||
return
|
||||
|
||||
# There's been a change to or from being world readable.
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
logger.debug("Change: %r, is_public: %r", change, is_public)
|
||||
|
||||
if change and not is_public:
|
||||
# If we became world readable but room isn't currently public then
|
||||
# we ignore the change
|
||||
return
|
||||
elif not change and is_public:
|
||||
# If we stopped being world readable but are still public,
|
||||
# ignore the change
|
||||
return
|
||||
|
||||
if change:
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
for user_id, profile in users_with_profile.iteritems():
|
||||
yield self._handle_new_user(room_id, user_id, profile)
|
||||
else:
|
||||
users = yield self.store.get_users_in_public_due_to_room(room_id)
|
||||
for user_id in users:
|
||||
yield self._handle_remove_user(room_id, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_new_user(self, room_id, user_id, profile):
|
||||
"""Called when we might need to add user to directory
|
||||
|
||||
Args:
|
||||
room_id (str): room_id that user joined or started being public that
|
||||
user_id (str)
|
||||
"""
|
||||
logger.debug("Adding user to dir, %r", user_id)
|
||||
|
||||
row = yield self.store.get_user_in_directory(user_id)
|
||||
if not row:
|
||||
yield self.store.add_profiles_to_user_dir(room_id, {user_id: profile})
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
if is_public:
|
||||
row = yield self.store.get_user_in_public_room(user_id)
|
||||
if not row:
|
||||
yield self.store.add_users_to_public_room(room_id, [user_id])
|
||||
else:
|
||||
logger.debug("Not adding user to public dir, %r", user_id)
|
||||
|
||||
# Now we update users who share rooms with users. We do this by getting
|
||||
# all the current users in the room and seeing which aren't already
|
||||
# marked in the database as sharing with `user_id`
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
|
||||
to_insert = set()
|
||||
to_update = set()
|
||||
|
||||
is_appservice = self.store.get_if_app_services_interested_in_user(user_id)
|
||||
|
||||
# First, if they're our user then we need to update for every user
|
||||
if self.is_mine_id(user_id) and not is_appservice:
|
||||
# Returns a map of other_user_id -> shared_private. We only need
|
||||
# to update mappings if for users that either don't share a room
|
||||
# already (aren't in the map) or, if the room is private, those that
|
||||
# only share a public room.
|
||||
user_ids_shared = yield self.store.get_users_who_share_room_from_dir(
|
||||
user_id
|
||||
)
|
||||
|
||||
for other_user_id in users_with_profile:
|
||||
if user_id == other_user_id:
|
||||
continue
|
||||
|
||||
shared_is_private = user_ids_shared.get(other_user_id)
|
||||
if shared_is_private is True:
|
||||
# We've already marked in the database they share a private room
|
||||
continue
|
||||
elif shared_is_private is False:
|
||||
# They already share a public room, so only update if this is
|
||||
# a private room
|
||||
if not is_public:
|
||||
to_update.add((user_id, other_user_id))
|
||||
elif shared_is_private is None:
|
||||
# This is the first time they both share a room
|
||||
to_insert.add((user_id, other_user_id))
|
||||
|
||||
# Next we need to update for every local user in the room
|
||||
for other_user_id in users_with_profile:
|
||||
if user_id == other_user_id:
|
||||
continue
|
||||
|
||||
is_appservice = self.store.get_if_app_services_interested_in_user(
|
||||
other_user_id
|
||||
)
|
||||
if self.is_mine_id(other_user_id) and not is_appservice:
|
||||
shared_is_private = yield self.store.get_if_users_share_a_room(
|
||||
other_user_id, user_id,
|
||||
)
|
||||
if shared_is_private is True:
|
||||
# We've already marked in the database they share a private room
|
||||
continue
|
||||
elif shared_is_private is False:
|
||||
# They already share a public room, so only update if this is
|
||||
# a private room
|
||||
if not is_public:
|
||||
to_update.add((other_user_id, user_id))
|
||||
elif shared_is_private is None:
|
||||
# This is the first time they both share a room
|
||||
to_insert.add((other_user_id, user_id))
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_remove_user(self, room_id, user_id):
|
||||
"""Called when we might need to remove user to directory
|
||||
|
||||
Args:
|
||||
room_id (str): room_id that user left or stopped being public that
|
||||
user_id (str)
|
||||
"""
|
||||
logger.debug("Maybe removing user %r", user_id)
|
||||
|
||||
row = yield self.store.get_user_in_directory(user_id)
|
||||
update_user_dir = row and row["room_id"] == room_id
|
||||
|
||||
row = yield self.store.get_user_in_public_room(user_id)
|
||||
update_user_in_public = row and row["room_id"] == room_id
|
||||
|
||||
if (update_user_in_public or update_user_dir):
|
||||
# XXX: Make this faster?
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
for j_room_id in rooms:
|
||||
if (not update_user_in_public and not update_user_dir):
|
||||
break
|
||||
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
j_room_id, self.server_name,
|
||||
)
|
||||
|
||||
if not is_in_room:
|
||||
continue
|
||||
|
||||
if update_user_dir:
|
||||
update_user_dir = False
|
||||
yield self.store.update_user_in_user_dir(user_id, j_room_id)
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
j_room_id
|
||||
)
|
||||
|
||||
if update_user_in_public and is_public:
|
||||
yield self.store.update_user_in_public_user_list(user_id, j_room_id)
|
||||
update_user_in_public = False
|
||||
|
||||
if update_user_dir:
|
||||
yield self.store.remove_from_user_dir(user_id)
|
||||
elif update_user_in_public:
|
||||
yield self.store.remove_from_user_in_public_room(user_id)
|
||||
|
||||
# Now handle users_who_share_rooms.
|
||||
|
||||
# Get a list of user tuples that were in the DB due to this room and
|
||||
# users (this includes tuples where the other user matches `user_id`)
|
||||
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
|
||||
user_id, room_id,
|
||||
)
|
||||
|
||||
for user_id, other_user_id in user_tuples:
|
||||
# For each user tuple get a list of rooms that they still share,
|
||||
# trying to find a private room, and update the entry in the DB
|
||||
rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
|
||||
|
||||
# If they dont share a room anymore, remove the mapping
|
||||
if not rooms:
|
||||
yield self.store.remove_user_who_share_room(
|
||||
user_id, other_user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
found_public_share = None
|
||||
for j_room_id in rooms:
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
j_room_id
|
||||
)
|
||||
|
||||
if is_public:
|
||||
found_public_share = j_room_id
|
||||
else:
|
||||
found_public_share = None
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
)
|
||||
break
|
||||
|
||||
if found_public_share:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_profile_change(self, user_id, room_id, prev_event_id, event_id):
|
||||
"""Check member event changes for any profile changes and update the
|
||||
database if there are.
|
||||
"""
|
||||
if not prev_event_id or not event_id:
|
||||
return
|
||||
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
if not prev_event or not event:
|
||||
return
|
||||
|
||||
if event.membership != Membership.JOIN:
|
||||
return
|
||||
|
||||
prev_name = prev_event.content.get("displayname")
|
||||
new_name = event.content.get("displayname")
|
||||
|
||||
prev_avatar = prev_event.content.get("avatar_url")
|
||||
new_avatar = event.content.get("avatar_url")
|
||||
|
||||
if prev_name != new_name or prev_avatar != new_avatar:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, new_name, new_avatar, room_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
|
||||
"""Given two events check if the `key_name` field in content changed
|
||||
from not matching `public_value` to doing so.
|
||||
|
||||
For example, check if `history_visibility` (`key_name`) changed from
|
||||
`shared` to `world_readable` (`public_value`).
|
||||
|
||||
Returns:
|
||||
None if the field in the events either both match `public_value`
|
||||
or if neither do, i.e. there has been no change.
|
||||
True if it didnt match `public_value` but now does
|
||||
False if it did match `public_value` but now doesn't
|
||||
"""
|
||||
prev_event = None
|
||||
event = None
|
||||
if prev_event_id:
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
|
||||
if event_id:
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
if not event and not prev_event:
|
||||
logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
|
||||
defer.returnValue(None)
|
||||
|
||||
prev_value = None
|
||||
value = None
|
||||
|
||||
if prev_event:
|
||||
prev_value = prev_event.content.get(key_name)
|
||||
|
||||
if event:
|
||||
value = event.content.get(key_name)
|
||||
|
||||
logger.debug("prev_value: %r -> value: %r", prev_value, value)
|
||||
|
||||
if value == public_value and prev_value != public_value:
|
||||
defer.returnValue(True)
|
||||
elif value != public_value and prev_value == public_value:
|
||||
defer.returnValue(False)
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import socket
|
||||
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet import defer, reactor
|
||||
@@ -30,7 +31,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
SERVER_CACHE = {}
|
||||
|
||||
|
||||
# our record of an individual server which can be tried to reach a destination.
|
||||
#
|
||||
# "host" is actually a dotted-quad or ipv6 address string. Except when there's
|
||||
# no SRV record, in which case it is the original hostname.
|
||||
_Server = collections.namedtuple(
|
||||
"_Server", "priority weight host port expires"
|
||||
)
|
||||
@@ -219,9 +223,10 @@ class SRVClientEndpoint(object):
|
||||
return self.default_server
|
||||
else:
|
||||
raise ConnectError(
|
||||
"Not server available for %s" % self.service_name
|
||||
"No server available for %s" % self.service_name
|
||||
)
|
||||
|
||||
# look for all servers with the same priority
|
||||
min_priority = self.servers[0].priority
|
||||
weight_indexes = list(
|
||||
(index, server.weight + 1)
|
||||
@@ -231,11 +236,22 @@ class SRVClientEndpoint(object):
|
||||
|
||||
total_weight = sum(weight for index, weight in weight_indexes)
|
||||
target_weight = random.randint(0, total_weight)
|
||||
|
||||
for index, weight in weight_indexes:
|
||||
target_weight -= weight
|
||||
if target_weight <= 0:
|
||||
server = self.servers[index]
|
||||
# XXX: this looks totally dubious:
|
||||
#
|
||||
# (a) we never reuse a server until we have been through
|
||||
# all of the servers at the same priority, so if the
|
||||
# weights are A: 100, B:1, we always do ABABAB instead of
|
||||
# AAAA...AAAB (approximately).
|
||||
#
|
||||
# (b) After using all the servers at the lowest priority,
|
||||
# we move onto the next priority. We should only use the
|
||||
# second priority if servers at the top priority are
|
||||
# unreachable.
|
||||
#
|
||||
del self.servers[index]
|
||||
self.used_servers.append(server)
|
||||
return server
|
||||
@@ -280,26 +296,21 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t
|
||||
continue
|
||||
|
||||
payload = answer.payload
|
||||
host = str(payload.target)
|
||||
srv_ttl = answer.ttl
|
||||
|
||||
try:
|
||||
answers, _, _ = yield dns_client.lookupAddress(host)
|
||||
except DNSNameError:
|
||||
continue
|
||||
hosts = yield _get_hosts_for_srv_record(
|
||||
dns_client, str(payload.target)
|
||||
)
|
||||
|
||||
for answer in answers:
|
||||
if answer.type == dns.A and answer.payload:
|
||||
ip = answer.payload.dottedQuad()
|
||||
host_ttl = min(srv_ttl, answer.ttl)
|
||||
for (ip, ttl) in hosts:
|
||||
host_ttl = min(answer.ttl, ttl)
|
||||
|
||||
servers.append(_Server(
|
||||
host=ip,
|
||||
port=int(payload.port),
|
||||
priority=int(payload.priority),
|
||||
weight=int(payload.weight),
|
||||
expires=int(clock.time()) + host_ttl,
|
||||
))
|
||||
servers.append(_Server(
|
||||
host=ip,
|
||||
port=int(payload.port),
|
||||
priority=int(payload.priority),
|
||||
weight=int(payload.weight),
|
||||
expires=int(clock.time()) + host_ttl,
|
||||
))
|
||||
|
||||
servers.sort()
|
||||
cache[service_name] = list(servers)
|
||||
@@ -317,3 +328,80 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t
|
||||
raise e
|
||||
|
||||
defer.returnValue(servers)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_hosts_for_srv_record(dns_client, host):
|
||||
"""Look up each of the hosts in a SRV record
|
||||
|
||||
Args:
|
||||
dns_client (twisted.names.dns.IResolver):
|
||||
host (basestring): host to look up
|
||||
|
||||
Returns:
|
||||
Deferred[list[(str, int)]]: a list of (host, ttl) pairs
|
||||
|
||||
"""
|
||||
ip4_servers = []
|
||||
ip6_servers = []
|
||||
|
||||
def cb(res):
|
||||
# lookupAddress and lookupIP6Address return a three-tuple
|
||||
# giving the answer, authority, and additional sections of the
|
||||
# response.
|
||||
#
|
||||
# we only care about the answers.
|
||||
|
||||
return res[0]
|
||||
|
||||
def eb(res, record_type):
|
||||
if res.check(DNSNameError):
|
||||
return []
|
||||
logger.warn("Error looking up %s for %s: %s",
|
||||
record_type, host, res, res.value)
|
||||
return res
|
||||
|
||||
# no logcontexts here, so we can safely fire these off and gatherResults
|
||||
d1 = dns_client.lookupAddress(host).addCallbacks(cb, eb)
|
||||
d2 = dns_client.lookupIPV6Address(host).addCallbacks(cb, eb)
|
||||
results = yield defer.DeferredList(
|
||||
[d1, d2], consumeErrors=True)
|
||||
|
||||
# if all of the lookups failed, raise an exception rather than blowing out
|
||||
# the cache with an empty result.
|
||||
if results and all(s == defer.FAILURE for (s, _) in results):
|
||||
defer.returnValue(results[0][1])
|
||||
|
||||
for (success, result) in results:
|
||||
if success == defer.FAILURE:
|
||||
continue
|
||||
|
||||
for answer in result:
|
||||
if not answer.payload:
|
||||
continue
|
||||
|
||||
try:
|
||||
if answer.type == dns.A:
|
||||
ip = answer.payload.dottedQuad()
|
||||
ip4_servers.append((ip, answer.ttl))
|
||||
elif answer.type == dns.AAAA:
|
||||
ip = socket.inet_ntop(
|
||||
socket.AF_INET6, answer.payload.address,
|
||||
)
|
||||
ip6_servers.append((ip, answer.ttl))
|
||||
else:
|
||||
# the most likely candidate here is a CNAME record.
|
||||
# rfc2782 says srvs may not point to aliases.
|
||||
logger.warn(
|
||||
"Ignoring unexpected DNS record type %s for %s",
|
||||
answer.type, host,
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warn("Ignoring invalid DNS response for %s: %s",
|
||||
host, e)
|
||||
continue
|
||||
|
||||
# keep the ipv4 results before the ipv6 results, mostly to match historical
|
||||
# behaviour.
|
||||
defer.returnValue(ip4_servers + ip6_servers)
|
||||
|
||||
@@ -204,18 +204,15 @@ class MatrixFederationHttpClient(object):
|
||||
raise
|
||||
|
||||
logger.warn(
|
||||
"{%s} Sending request failed to %s: %s %s: %s - %s",
|
||||
"{%s} Sending request failed to %s: %s %s: %s",
|
||||
txn_id,
|
||||
destination,
|
||||
method,
|
||||
url_bytes,
|
||||
type(e).__name__,
|
||||
_flatten_response_never_received(e),
|
||||
)
|
||||
|
||||
log_result = "%s - %s" % (
|
||||
type(e).__name__, _flatten_response_never_received(e),
|
||||
)
|
||||
log_result = _flatten_response_never_received(e)
|
||||
|
||||
if retries_left and not timeout:
|
||||
if long_retries:
|
||||
@@ -578,12 +575,14 @@ class _JsonProducer(object):
|
||||
|
||||
def _flatten_response_never_received(e):
|
||||
if hasattr(e, "reasons"):
|
||||
return ", ".join(
|
||||
reasons = ", ".join(
|
||||
_flatten_response_never_received(f.value)
|
||||
for f in e.reasons
|
||||
)
|
||||
|
||||
return "%s:[%s]" % (type(e).__name__, reasons)
|
||||
else:
|
||||
return "%s: %s" % (type(e).__name__, e.message,)
|
||||
return repr(e)
|
||||
|
||||
|
||||
def check_content_type_is_json(headers):
|
||||
|
||||
@@ -412,7 +412,7 @@ def set_cors_headers(request):
|
||||
)
|
||||
request.setHeader(
|
||||
"Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept"
|
||||
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import logging
|
||||
import re
|
||||
import time
|
||||
|
||||
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
|
||||
ACCESS_TOKEN_RE = re.compile(r'(\?\S*?access(_|%5[Ff])token=).*?(&|\s|$)')
|
||||
|
||||
|
||||
class SynapseRequest(Request):
|
||||
|
||||
@@ -251,7 +251,8 @@ class Notifier(object):
|
||||
"""Notify any user streams that are interested in this room event"""
|
||||
# poke any interested application service.
|
||||
preserve_fn(self.appservice_handler.notify_interested_services)(
|
||||
room_stream_id)
|
||||
room_stream_id
|
||||
)
|
||||
|
||||
if self.federation_sender:
|
||||
preserve_fn(self.federation_sender.notify_new_events)(
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .bulk_push_rule_evaluator import evaluator_for_event
|
||||
from .bulk_push_rule_evaluator import BulkPushRuleEvaluator
|
||||
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -24,11 +24,12 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ActionGenerator:
|
||||
class ActionGenerator(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.bulk_evaluator = BulkPushRuleEvaluator(hs)
|
||||
# really we want to get all user ids and all profile tags too,
|
||||
# since we want the actions for each profile tag for every user and
|
||||
# also actions for a client with no profile tag for each user.
|
||||
@@ -38,16 +39,11 @@ class ActionGenerator:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_push_actions_for_event(self, event, context):
|
||||
with Measure(self.clock, "evaluator_for_event"):
|
||||
bulk_evaluator = yield evaluator_for_event(
|
||||
event, self.hs, self.store, context
|
||||
)
|
||||
|
||||
with Measure(self.clock, "action_for_event_by_user"):
|
||||
actions_by_user = yield bulk_evaluator.action_for_event_by_user(
|
||||
actions_by_user = yield self.bulk_evaluator.action_for_event_by_user(
|
||||
event, context
|
||||
)
|
||||
|
||||
context.push_actions = [
|
||||
(uid, actions) for uid, actions in actions_by_user.items()
|
||||
(uid, actions) for uid, actions in actions_by_user.iteritems()
|
||||
]
|
||||
|
||||
@@ -19,65 +19,106 @@ from twisted.internet import defer
|
||||
|
||||
from .push_rule_evaluator import PushRuleEvaluatorForEvent
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.visibility import filter_events_for_clients_context
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.metrics import get_metrics_for
|
||||
from synapse.util.caches import metrics as cache_metrics
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.async import Linearizer
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def evaluator_for_event(event, hs, store, context):
|
||||
rules_by_user = yield store.bulk_get_push_rules_for_room(
|
||||
event, context
|
||||
)
|
||||
rules_by_room = {}
|
||||
|
||||
# if this event is an invite event, we may need to run rules for the user
|
||||
# who's been invited, otherwise they won't get told they've been invited
|
||||
if event.type == 'm.room.member' and event.content['membership'] == 'invite':
|
||||
invited_user = event.state_key
|
||||
if invited_user and hs.is_mine_id(invited_user):
|
||||
has_pusher = yield store.user_has_pusher(invited_user)
|
||||
if has_pusher:
|
||||
rules_by_user = dict(rules_by_user)
|
||||
rules_by_user[invited_user] = yield store.get_push_rules_for_user(
|
||||
invited_user
|
||||
)
|
||||
push_metrics = get_metrics_for(__name__)
|
||||
|
||||
defer.returnValue(BulkPushRuleEvaluator(
|
||||
event.room_id, rules_by_user, store
|
||||
))
|
||||
push_rules_invalidation_counter = push_metrics.register_counter(
|
||||
"push_rules_invalidation_counter"
|
||||
)
|
||||
push_rules_state_size_counter = push_metrics.register_counter(
|
||||
"push_rules_state_size_counter"
|
||||
)
|
||||
|
||||
# Measures whether we use the fast path of using state deltas, or if we have to
|
||||
# recalculate from scratch
|
||||
push_rules_delta_state_cache_metric = cache_metrics.register_cache(
|
||||
"cache",
|
||||
size_callback=lambda: 0, # Meaningless size, as this isn't a cache that stores values
|
||||
cache_name="push_rules_delta_state_cache_metric",
|
||||
)
|
||||
|
||||
|
||||
class BulkPushRuleEvaluator:
|
||||
class BulkPushRuleEvaluator(object):
|
||||
"""Calculates the outcome of push rules for an event for all users in the
|
||||
room at once.
|
||||
"""
|
||||
Runs push rules for all users in a room.
|
||||
This is faster than running PushRuleEvaluator for each user because it
|
||||
fetches all the rules for all the users in one (batched) db query
|
||||
rather than doing multiple queries per-user. It currently uses
|
||||
the same logic to run the actual rules, but could be optimised further
|
||||
(see https://matrix.org/jira/browse/SYN-562)
|
||||
"""
|
||||
def __init__(self, room_id, rules_by_user, store):
|
||||
self.room_id = room_id
|
||||
self.rules_by_user = rules_by_user
|
||||
self.store = store
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self.room_push_rule_cache_metrics = cache_metrics.register_cache(
|
||||
"cache",
|
||||
size_callback=lambda: 0, # There's not good value for this
|
||||
cache_name="room_push_rule_cache",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_rules_for_event(self, event, context):
|
||||
"""This gets the rules for all users in the room at the time of the event,
|
||||
as well as the push rules for the invitee if the event is an invite.
|
||||
|
||||
Returns:
|
||||
dict of user_id -> push_rules
|
||||
"""
|
||||
room_id = event.room_id
|
||||
rules_for_room = self._get_rules_for_room(room_id)
|
||||
|
||||
rules_by_user = yield rules_for_room.get_rules(event, context)
|
||||
|
||||
# if this event is an invite event, we may need to run rules for the user
|
||||
# who's been invited, otherwise they won't get told they've been invited
|
||||
if event.type == 'm.room.member' and event.content['membership'] == 'invite':
|
||||
invited = event.state_key
|
||||
if invited and self.hs.is_mine_id(invited):
|
||||
has_pusher = yield self.store.user_has_pusher(invited)
|
||||
if has_pusher:
|
||||
rules_by_user = dict(rules_by_user)
|
||||
rules_by_user[invited] = yield self.store.get_push_rules_for_user(
|
||||
invited
|
||||
)
|
||||
|
||||
defer.returnValue(rules_by_user)
|
||||
|
||||
@cached()
|
||||
def _get_rules_for_room(self, room_id):
|
||||
"""Get the current RulesForRoom object for the given room id
|
||||
|
||||
Returns:
|
||||
RulesForRoom
|
||||
"""
|
||||
# It's important that RulesForRoom gets added to self._get_rules_for_room.cache
|
||||
# before any lookup methods get called on it as otherwise there may be
|
||||
# a race if invalidate_all gets called (which assumes its in the cache)
|
||||
return RulesForRoom(
|
||||
self.hs, room_id, self._get_rules_for_room.cache,
|
||||
self.room_push_rule_cache_metrics,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def action_for_event_by_user(self, event, context):
|
||||
"""Given an event and context, evaluate the push rules and return
|
||||
the results
|
||||
|
||||
Returns:
|
||||
dict of user_id -> action
|
||||
"""
|
||||
rules_by_user = yield self._get_rules_for_event(event, context)
|
||||
actions_by_user = {}
|
||||
|
||||
# None of these users can be peeking since this list of users comes
|
||||
# from the set of users in the room, so we know for sure they're all
|
||||
# actually in the room.
|
||||
user_tuples = [
|
||||
(u, False) for u in self.rules_by_user.keys()
|
||||
]
|
||||
|
||||
filtered_by_user = yield filter_events_for_clients_context(
|
||||
self.store, user_tuples, [event], {event.event_id: context}
|
||||
)
|
||||
|
||||
room_members = yield self.store.get_joined_users_from_context(
|
||||
event, context
|
||||
)
|
||||
@@ -86,7 +127,15 @@ class BulkPushRuleEvaluator:
|
||||
|
||||
condition_cache = {}
|
||||
|
||||
for uid, rules in self.rules_by_user.items():
|
||||
for uid, rules in rules_by_user.iteritems():
|
||||
if event.sender == uid:
|
||||
continue
|
||||
|
||||
if not event.is_state():
|
||||
is_ignored = yield self.store.is_ignored_by(event.sender, uid)
|
||||
if is_ignored:
|
||||
continue
|
||||
|
||||
display_name = None
|
||||
profile_info = room_members.get(uid)
|
||||
if profile_info:
|
||||
@@ -98,13 +147,6 @@ class BulkPushRuleEvaluator:
|
||||
if event.type == EventTypes.Member and event.state_key == uid:
|
||||
display_name = event.content.get("displayname", None)
|
||||
|
||||
filtered = filtered_by_user[uid]
|
||||
if len(filtered) == 0:
|
||||
continue
|
||||
|
||||
if filtered[0].sender == uid:
|
||||
continue
|
||||
|
||||
for rule in rules:
|
||||
if 'enabled' in rule and not rule['enabled']:
|
||||
continue
|
||||
@@ -138,3 +180,264 @@ def _condition_checker(evaluator, conditions, uid, display_name, cache):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class RulesForRoom(object):
|
||||
"""Caches push rules for users in a room.
|
||||
|
||||
This efficiently handles users joining/leaving the room by not invalidating
|
||||
the entire cache for the room.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, room_id, rules_for_room_cache, room_push_rule_cache_metrics):
|
||||
"""
|
||||
Args:
|
||||
hs (HomeServer)
|
||||
room_id (str)
|
||||
rules_for_room_cache(Cache): The cache object that caches these
|
||||
RoomsForUser objects.
|
||||
room_push_rule_cache_metrics (CacheMetric)
|
||||
"""
|
||||
self.room_id = room_id
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.store = hs.get_datastore()
|
||||
self.room_push_rule_cache_metrics = room_push_rule_cache_metrics
|
||||
|
||||
self.linearizer = Linearizer(name="rules_for_room")
|
||||
|
||||
self.member_map = {} # event_id -> (user_id, state)
|
||||
self.rules_by_user = {} # user_id -> rules
|
||||
|
||||
# The last state group we updated the caches for. If the state_group of
|
||||
# a new event comes along, we know that we can just return the cached
|
||||
# result.
|
||||
# On invalidation of the rules themselves (if the user changes them),
|
||||
# we invalidate everything and set state_group to `object()`
|
||||
self.state_group = object()
|
||||
|
||||
# A sequence number to keep track of when we're allowed to update the
|
||||
# cache. We bump the sequence number when we invalidate the cache. If
|
||||
# the sequence number changes while we're calculating stuff we should
|
||||
# not update the cache with it.
|
||||
self.sequence = 0
|
||||
|
||||
# A cache of user_ids that we *know* aren't interesting, e.g. user_ids
|
||||
# owned by AS's, or remote users, etc. (I.e. users we will never need to
|
||||
# calculate push for)
|
||||
# These never need to be invalidated as we will never set up push for
|
||||
# them.
|
||||
self.uninteresting_user_set = set()
|
||||
|
||||
# We need to be clever on the invalidating caches callbacks, as
|
||||
# otherwise the invalidation callback holds a reference to the object,
|
||||
# potentially causing it to leak.
|
||||
# To get around this we pass a function that on invalidations looks ups
|
||||
# the RoomsForUser entry in the cache, rather than keeping a reference
|
||||
# to self around in the callback.
|
||||
self.invalidate_all_cb = _Invalidation(rules_for_room_cache, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rules(self, event, context):
|
||||
"""Given an event context return the rules for all users who are
|
||||
currently in the room.
|
||||
"""
|
||||
state_group = context.state_group
|
||||
|
||||
if state_group and self.state_group == state_group:
|
||||
logger.debug("Using cached rules for %r", self.room_id)
|
||||
self.room_push_rule_cache_metrics.inc_hits()
|
||||
defer.returnValue(self.rules_by_user)
|
||||
|
||||
with (yield self.linearizer.queue(())):
|
||||
if state_group and self.state_group == state_group:
|
||||
logger.debug("Using cached rules for %r", self.room_id)
|
||||
self.room_push_rule_cache_metrics.inc_hits()
|
||||
defer.returnValue(self.rules_by_user)
|
||||
|
||||
self.room_push_rule_cache_metrics.inc_misses()
|
||||
|
||||
ret_rules_by_user = {}
|
||||
missing_member_event_ids = {}
|
||||
if state_group and self.state_group == context.prev_group:
|
||||
# If we have a simple delta then we can reuse most of the previous
|
||||
# results.
|
||||
ret_rules_by_user = self.rules_by_user
|
||||
current_state_ids = context.delta_ids
|
||||
|
||||
push_rules_delta_state_cache_metric.inc_hits()
|
||||
else:
|
||||
current_state_ids = context.current_state_ids
|
||||
push_rules_delta_state_cache_metric.inc_misses()
|
||||
|
||||
push_rules_state_size_counter.inc_by(len(current_state_ids))
|
||||
|
||||
logger.debug(
|
||||
"Looking for member changes in %r %r", state_group, current_state_ids
|
||||
)
|
||||
|
||||
# Loop through to see which member events we've seen and have rules
|
||||
# for and which we need to fetch
|
||||
for key in current_state_ids:
|
||||
typ, user_id = key
|
||||
if typ != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if user_id in self.uninteresting_user_set:
|
||||
continue
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
self.uninteresting_user_set.add(user_id)
|
||||
continue
|
||||
|
||||
if self.store.get_if_app_services_interested_in_user(user_id):
|
||||
self.uninteresting_user_set.add(user_id)
|
||||
continue
|
||||
|
||||
event_id = current_state_ids[key]
|
||||
|
||||
res = self.member_map.get(event_id, None)
|
||||
if res:
|
||||
user_id, state = res
|
||||
if state == Membership.JOIN:
|
||||
rules = self.rules_by_user.get(user_id, None)
|
||||
if rules:
|
||||
ret_rules_by_user[user_id] = rules
|
||||
continue
|
||||
|
||||
# If a user has left a room we remove their push rule. If they
|
||||
# joined then we readd it later in _update_rules_with_member_event_ids
|
||||
ret_rules_by_user.pop(user_id, None)
|
||||
missing_member_event_ids[user_id] = event_id
|
||||
|
||||
if missing_member_event_ids:
|
||||
# If we have some memebr events we haven't seen, look them up
|
||||
# and fetch push rules for them if appropriate.
|
||||
logger.debug("Found new member events %r", missing_member_event_ids)
|
||||
yield self._update_rules_with_member_event_ids(
|
||||
ret_rules_by_user, missing_member_event_ids, state_group, event
|
||||
)
|
||||
else:
|
||||
# The push rules didn't change but lets update the cache anyway
|
||||
self.update_cache(
|
||||
self.sequence,
|
||||
members={}, # There were no membership changes
|
||||
rules_by_user=ret_rules_by_user,
|
||||
state_group=state_group
|
||||
)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
"Returning push rules for %r %r",
|
||||
self.room_id, ret_rules_by_user.keys(),
|
||||
)
|
||||
defer.returnValue(ret_rules_by_user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_rules_with_member_event_ids(self, ret_rules_by_user, member_event_ids,
|
||||
state_group, event):
|
||||
"""Update the partially filled rules_by_user dict by fetching rules for
|
||||
any newly joined users in the `member_event_ids` list.
|
||||
|
||||
Args:
|
||||
ret_rules_by_user (dict): Partiallly filled dict of push rules. Gets
|
||||
updated with any new rules.
|
||||
member_event_ids (list): List of event ids for membership events that
|
||||
have happened since the last time we filled rules_by_user
|
||||
state_group: The state group we are currently computing push rules
|
||||
for. Used when updating the cache.
|
||||
"""
|
||||
sequence = self.sequence
|
||||
|
||||
rows = yield self.store._simple_select_many_batch(
|
||||
table="room_memberships",
|
||||
column="event_id",
|
||||
iterable=member_event_ids.values(),
|
||||
retcols=('user_id', 'membership', 'event_id'),
|
||||
keyvalues={},
|
||||
batch_size=500,
|
||||
desc="_get_rules_for_member_event_ids",
|
||||
)
|
||||
|
||||
members = {
|
||||
row["event_id"]: (row["user_id"], row["membership"])
|
||||
for row in rows
|
||||
}
|
||||
|
||||
# If the event is a join event then it will be in current state evnts
|
||||
# map but not in the DB, so we have to explicitly insert it.
|
||||
if event.type == EventTypes.Member:
|
||||
for event_id in member_event_ids.itervalues():
|
||||
if event_id == event.event_id:
|
||||
members[event_id] = (event.state_key, event.membership)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Found members %r: %r", self.room_id, members.values())
|
||||
|
||||
interested_in_user_ids = set(
|
||||
user_id for user_id, membership in members.itervalues()
|
||||
if membership == Membership.JOIN
|
||||
)
|
||||
|
||||
logger.debug("Joined: %r", interested_in_user_ids)
|
||||
|
||||
if_users_with_pushers = yield self.store.get_if_users_have_pushers(
|
||||
interested_in_user_ids,
|
||||
on_invalidate=self.invalidate_all_cb,
|
||||
)
|
||||
|
||||
user_ids = set(
|
||||
uid for uid, have_pusher in if_users_with_pushers.iteritems() if have_pusher
|
||||
)
|
||||
|
||||
logger.debug("With pushers: %r", user_ids)
|
||||
|
||||
users_with_receipts = yield self.store.get_users_with_read_receipts_in_room(
|
||||
self.room_id, on_invalidate=self.invalidate_all_cb,
|
||||
)
|
||||
|
||||
logger.debug("With receipts: %r", users_with_receipts)
|
||||
|
||||
# any users with pushers must be ours: they have pushers
|
||||
for uid in users_with_receipts:
|
||||
if uid in interested_in_user_ids:
|
||||
user_ids.add(uid)
|
||||
|
||||
rules_by_user = yield self.store.bulk_get_push_rules(
|
||||
user_ids, on_invalidate=self.invalidate_all_cb,
|
||||
)
|
||||
|
||||
ret_rules_by_user.update(
|
||||
item for item in rules_by_user.iteritems() if item[0] is not None
|
||||
)
|
||||
|
||||
self.update_cache(sequence, members, ret_rules_by_user, state_group)
|
||||
|
||||
def invalidate_all(self):
|
||||
# Note: Don't hand this function directly to an invalidation callback
|
||||
# as it keeps a reference to self and will stop this instance from being
|
||||
# GC'd if it gets dropped from the rules_to_user cache. Instead use
|
||||
# `self.invalidate_all_cb`
|
||||
logger.debug("Invalidating RulesForRoom for %r", self.room_id)
|
||||
self.sequence += 1
|
||||
self.state_group = object()
|
||||
self.member_map = {}
|
||||
self.rules_by_user = {}
|
||||
push_rules_invalidation_counter.inc()
|
||||
|
||||
def update_cache(self, sequence, members, rules_by_user, state_group):
|
||||
if sequence == self.sequence:
|
||||
self.member_map.update(members)
|
||||
self.rules_by_user = rules_by_user
|
||||
self.state_group = state_group
|
||||
|
||||
|
||||
class _Invalidation(namedtuple("_Invalidation", ("cache", "room_id"))):
|
||||
# We rely on _CacheContext implementing __eq__ and __hash__ sensibly,
|
||||
# which namedtuple does for us (i.e. two _CacheContext are the same if
|
||||
# their caches and keys match). This is important in particular to
|
||||
# dedupe when we add callbacks to lru cache nodes, otherwise the number
|
||||
# of callbacks would grow.
|
||||
def __call__(self):
|
||||
rules = self.cache.get(self.room_id, None, update_metrics=False)
|
||||
if rules:
|
||||
rules.invalidate_all()
|
||||
|
||||
@@ -21,7 +21,6 @@ import logging
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
from mailer import Mailer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -56,8 +55,10 @@ class EmailPusher(object):
|
||||
This shares quite a bit of code with httpusher: it would be good to
|
||||
factor out the common parts
|
||||
"""
|
||||
def __init__(self, hs, pusherdict):
|
||||
def __init__(self, hs, pusherdict, mailer):
|
||||
self.hs = hs
|
||||
self.mailer = mailer
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
self.clock = self.hs.get_clock()
|
||||
self.pusher_id = pusherdict['id']
|
||||
@@ -73,16 +74,6 @@ class EmailPusher(object):
|
||||
|
||||
self.processing = False
|
||||
|
||||
if self.hs.config.email_enable_notifs:
|
||||
if 'data' in pusherdict and 'brand' in pusherdict['data']:
|
||||
app_name = pusherdict['data']['brand']
|
||||
else:
|
||||
app_name = self.hs.config.email_app_name
|
||||
|
||||
self.mailer = Mailer(self.hs, app_name)
|
||||
else:
|
||||
self.mailer = None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_started(self):
|
||||
if self.mailer is not None:
|
||||
|
||||
@@ -244,6 +244,26 @@ class HttpPusher(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _build_notification_dict(self, event, tweaks, badge):
|
||||
if self.data.get('format') == 'event_id_only':
|
||||
d = {
|
||||
'notification': {
|
||||
'event_id': event.event_id,
|
||||
'room_id': event.room_id,
|
||||
'counts': {
|
||||
'unread': badge,
|
||||
},
|
||||
'devices': [
|
||||
{
|
||||
'app_id': self.app_id,
|
||||
'pushkey': self.pushkey,
|
||||
'pushkey_ts': long(self.pushkey_ts / 1000),
|
||||
'data': self.data_minus_url,
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
defer.returnValue(d)
|
||||
|
||||
ctx = yield push_tools.get_context_for_event(
|
||||
self.store, self.state_handler, event, self.user_id
|
||||
)
|
||||
@@ -275,7 +295,7 @@ class HttpPusher(object):
|
||||
if event.type == 'm.room.member':
|
||||
d['notification']['membership'] = event.content['membership']
|
||||
d['notification']['user_is_target'] = event.state_key == self.user_id
|
||||
if 'content' in event:
|
||||
if not self.hs.config.push_redact_content and 'content' in event:
|
||||
d['notification']['content'] = event.content
|
||||
|
||||
# We no longer send aliases separately, instead, we send the human
|
||||
|
||||
@@ -78,23 +78,17 @@ ALLOWED_ATTRS = {
|
||||
|
||||
|
||||
class Mailer(object):
|
||||
def __init__(self, hs, app_name):
|
||||
def __init__(self, hs, app_name, notif_template_html, notif_template_text):
|
||||
self.hs = hs
|
||||
self.notif_template_html = notif_template_html
|
||||
self.notif_template_text = notif_template_text
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
self.macaroon_gen = self.hs.get_macaroon_generator()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
|
||||
self.app_name = app_name
|
||||
|
||||
logger.info("Created Mailer for app_name %s" % app_name)
|
||||
env = jinja2.Environment(loader=loader)
|
||||
env.filters["format_ts"] = format_ts_filter
|
||||
env.filters["mxc_to_http"] = self.mxc_to_http_filter
|
||||
self.notif_template_html = env.get_template(
|
||||
self.hs.config.email_notif_template_html
|
||||
)
|
||||
self.notif_template_text = env.get_template(
|
||||
self.hs.config.email_notif_template_text
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_notification_mail(self, app_id, user_id, email_address,
|
||||
@@ -481,28 +475,6 @@ class Mailer(object):
|
||||
urllib.urlencode(params),
|
||||
)
|
||||
|
||||
def mxc_to_http_filter(self, value, width, height, resize_method="crop"):
|
||||
if value[0:6] != "mxc://":
|
||||
return ""
|
||||
|
||||
serverAndMediaId = value[6:]
|
||||
fragment = None
|
||||
if '#' in serverAndMediaId:
|
||||
(serverAndMediaId, fragment) = serverAndMediaId.split('#', 1)
|
||||
fragment = "#" + fragment
|
||||
|
||||
params = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"method": resize_method,
|
||||
}
|
||||
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
serverAndMediaId,
|
||||
urllib.urlencode(params),
|
||||
fragment or "",
|
||||
)
|
||||
|
||||
|
||||
def safe_markup(raw_html):
|
||||
return jinja2.Markup(bleach.linkify(bleach.clean(
|
||||
@@ -543,3 +515,52 @@ def string_ordinal_total(s):
|
||||
|
||||
def format_ts_filter(value, format):
|
||||
return time.strftime(format, time.localtime(value / 1000))
|
||||
|
||||
|
||||
def load_jinja2_templates(config):
|
||||
"""Load the jinja2 email templates from disk
|
||||
|
||||
Returns:
|
||||
(notif_template_html, notif_template_text)
|
||||
"""
|
||||
logger.info("loading jinja2")
|
||||
|
||||
loader = jinja2.FileSystemLoader(config.email_template_dir)
|
||||
env = jinja2.Environment(loader=loader)
|
||||
env.filters["format_ts"] = format_ts_filter
|
||||
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
|
||||
|
||||
notif_template_html = env.get_template(
|
||||
config.email_notif_template_html
|
||||
)
|
||||
notif_template_text = env.get_template(
|
||||
config.email_notif_template_text
|
||||
)
|
||||
|
||||
return notif_template_html, notif_template_text
|
||||
|
||||
|
||||
def _create_mxc_to_http_filter(config):
|
||||
def mxc_to_http_filter(value, width, height, resize_method="crop"):
|
||||
if value[0:6] != "mxc://":
|
||||
return ""
|
||||
|
||||
serverAndMediaId = value[6:]
|
||||
fragment = None
|
||||
if '#' in serverAndMediaId:
|
||||
(serverAndMediaId, fragment) = serverAndMediaId.split('#', 1)
|
||||
fragment = "#" + fragment
|
||||
|
||||
params = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"method": resize_method,
|
||||
}
|
||||
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
|
||||
config.public_baseurl,
|
||||
serverAndMediaId,
|
||||
urllib.urlencode(params),
|
||||
fragment or "",
|
||||
)
|
||||
|
||||
return mxc_to_http_filter
|
||||
|
||||
@@ -200,7 +200,9 @@ def _glob_to_re(glob, word_boundary):
|
||||
return re.compile(r, flags=re.IGNORECASE)
|
||||
|
||||
|
||||
def _flatten_dict(d, prefix=[], result={}):
|
||||
def _flatten_dict(d, prefix=[], result=None):
|
||||
if result is None:
|
||||
result = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, basestring):
|
||||
result[".".join(prefix + [key])] = value.lower()
|
||||
|
||||
@@ -26,22 +26,54 @@ logger = logging.getLogger(__name__)
|
||||
# process works fine)
|
||||
try:
|
||||
from synapse.push.emailpusher import EmailPusher
|
||||
from synapse.push.mailer import Mailer, load_jinja2_templates
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def create_pusher(hs, pusherdict):
|
||||
logger.info("trying to create_pusher for %r", pusherdict)
|
||||
class PusherFactory(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
||||
PUSHER_TYPES = {
|
||||
"http": HttpPusher,
|
||||
}
|
||||
self.pusher_types = {
|
||||
"http": HttpPusher,
|
||||
}
|
||||
|
||||
logger.info("email enable notifs: %r", hs.config.email_enable_notifs)
|
||||
if hs.config.email_enable_notifs:
|
||||
PUSHER_TYPES["email"] = EmailPusher
|
||||
logger.info("defined email pusher type")
|
||||
logger.info("email enable notifs: %r", hs.config.email_enable_notifs)
|
||||
if hs.config.email_enable_notifs:
|
||||
self.mailers = {} # app_name -> Mailer
|
||||
|
||||
if pusherdict['kind'] in PUSHER_TYPES:
|
||||
logger.info("found pusher")
|
||||
return PUSHER_TYPES[pusherdict['kind']](hs, pusherdict)
|
||||
templates = load_jinja2_templates(hs.config)
|
||||
self.notif_template_html, self.notif_template_text = templates
|
||||
|
||||
self.pusher_types["email"] = self._create_email_pusher
|
||||
|
||||
logger.info("defined email pusher type")
|
||||
|
||||
def create_pusher(self, pusherdict):
|
||||
logger.info("trying to create_pusher for %r", pusherdict)
|
||||
|
||||
if pusherdict['kind'] in self.pusher_types:
|
||||
logger.info("found pusher")
|
||||
return self.pusher_types[pusherdict['kind']](self.hs, pusherdict)
|
||||
|
||||
def _create_email_pusher(self, _hs, pusherdict):
|
||||
app_name = self._app_name_from_pusherdict(pusherdict)
|
||||
mailer = self.mailers.get(app_name)
|
||||
if not mailer:
|
||||
mailer = Mailer(
|
||||
hs=self.hs,
|
||||
app_name=app_name,
|
||||
notif_template_html=self.notif_template_html,
|
||||
notif_template_text=self.notif_template_text,
|
||||
)
|
||||
self.mailers[app_name] = mailer
|
||||
return EmailPusher(self.hs, pusherdict, mailer)
|
||||
|
||||
def _app_name_from_pusherdict(self, pusherdict):
|
||||
if 'data' in pusherdict and 'brand' in pusherdict['data']:
|
||||
app_name = pusherdict['data']['brand']
|
||||
else:
|
||||
app_name = self.hs.config.email_app_name
|
||||
|
||||
return app_name
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import pusher
|
||||
from .pusher import PusherFactory
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.util.async import run_on_reactor
|
||||
|
||||
@@ -28,6 +28,7 @@ logger = logging.getLogger(__name__)
|
||||
class PusherPool:
|
||||
def __init__(self, _hs):
|
||||
self.hs = _hs
|
||||
self.pusher_factory = PusherFactory(_hs)
|
||||
self.start_pushers = _hs.config.start_pushers
|
||||
self.store = self.hs.get_datastore()
|
||||
self.clock = self.hs.get_clock()
|
||||
@@ -48,7 +49,7 @@ class PusherPool:
|
||||
# will then get pulled out of the database,
|
||||
# recreated, added and started: this means we have only one
|
||||
# code path adding pushers.
|
||||
pusher.create_pusher(self.hs, {
|
||||
self.pusher_factory.create_pusher({
|
||||
"id": None,
|
||||
"user_name": user_id,
|
||||
"kind": kind,
|
||||
@@ -186,7 +187,7 @@ class PusherPool:
|
||||
logger.info("Starting %d pushers", len(pushers))
|
||||
for pusherdict in pushers:
|
||||
try:
|
||||
p = pusher.create_pusher(self.hs, pusherdict)
|
||||
p = self.pusher_factory.create_pusher(pusherdict)
|
||||
except:
|
||||
logger.exception("Couldn't start a pusher: caught Exception")
|
||||
continue
|
||||
|
||||
@@ -31,7 +31,7 @@ REQUIREMENTS = {
|
||||
"pyyaml": ["yaml"],
|
||||
"pyasn1": ["pyasn1"],
|
||||
"daemonize": ["daemonize"],
|
||||
"py-bcrypt": ["bcrypt"],
|
||||
"bcrypt": ["bcrypt"],
|
||||
"pillow": ["PIL"],
|
||||
"pydenticon": ["pydenticon"],
|
||||
"ujson": ["ujson"],
|
||||
@@ -40,6 +40,7 @@ REQUIREMENTS = {
|
||||
"pymacaroons-pynacl": ["pymacaroons"],
|
||||
"msgpack-python>=0.3.0": ["msgpack"],
|
||||
"phonenumbers>=8.2.0": ["phonenumbers"],
|
||||
"affinity": ["affinity"],
|
||||
}
|
||||
CONDITIONAL_REQUIREMENTS = {
|
||||
"web_client": {
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
from ._base import BaseSlavedStore
|
||||
from synapse.storage import DataStore
|
||||
from synapse.config.appservice import load_appservices
|
||||
from synapse.storage.appservice import _make_exclusive_regex
|
||||
|
||||
|
||||
class SlavedApplicationServiceStore(BaseSlavedStore):
|
||||
@@ -25,6 +26,7 @@ class SlavedApplicationServiceStore(BaseSlavedStore):
|
||||
hs.config.server_name,
|
||||
hs.config.app_service_config_files
|
||||
)
|
||||
self.exclusive_user_regex = _make_exclusive_regex(self.services_cache)
|
||||
|
||||
get_app_service_by_token = DataStore.get_app_service_by_token.__func__
|
||||
get_app_service_by_user_id = DataStore.get_app_service_by_user_id.__func__
|
||||
@@ -38,3 +40,6 @@ class SlavedApplicationServiceStore(BaseSlavedStore):
|
||||
get_appservice_state = DataStore.get_appservice_state.__func__
|
||||
set_appservice_last_pos = DataStore.set_appservice_last_pos.__func__
|
||||
set_appservice_state = DataStore.set_appservice_state.__func__
|
||||
get_if_app_services_interested_in_user = (
|
||||
DataStore.get_if_app_services_interested_in_user.__func__
|
||||
)
|
||||
|
||||
47
synapse/replication/slave/storage/client_ips.py
Normal file
47
synapse/replication/slave/storage/client_ips.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
from synapse.storage.client_ips import LAST_SEEN_GRANULARITY
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches.descriptors import Cache
|
||||
|
||||
|
||||
class SlavedClientIpStore(BaseSlavedStore):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(SlavedClientIpStore, self).__init__(db_conn, hs)
|
||||
|
||||
self.client_ip_last_seen = Cache(
|
||||
name="client_ip_last_seen",
|
||||
keylen=4,
|
||||
max_entries=50000 * CACHE_SIZE_FACTOR,
|
||||
)
|
||||
|
||||
def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
|
||||
now = int(self._clock.time_msec())
|
||||
key = (user_id, access_token, ip)
|
||||
|
||||
try:
|
||||
last_seen = self.client_ip_last_seen.get(key)
|
||||
except KeyError:
|
||||
last_seen = None
|
||||
|
||||
# Rate-limited inserts
|
||||
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
|
||||
return
|
||||
|
||||
self.hs.get_tcp_replication().send_user_ip(
|
||||
user_id, access_token, ip, user_agent, device_id, now
|
||||
)
|
||||
@@ -16,6 +16,7 @@
|
||||
from ._base import BaseSlavedStore
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.end_to_end_keys import EndToEndKeyStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
|
||||
@@ -45,6 +46,7 @@ class SlavedDeviceStore(BaseSlavedStore):
|
||||
_mark_as_sent_devices_by_remote_txn = (
|
||||
DataStore._mark_as_sent_devices_by_remote_txn.__func__
|
||||
)
|
||||
count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"]
|
||||
|
||||
def stream_positions(self):
|
||||
result = super(SlavedDeviceStore, self).stream_positions()
|
||||
|
||||
@@ -108,6 +108,8 @@ class SlavedEventStore(BaseSlavedStore):
|
||||
get_current_state_ids = (
|
||||
StateStore.__dict__["get_current_state_ids"]
|
||||
)
|
||||
get_state_group_delta = StateStore.__dict__["get_state_group_delta"]
|
||||
_get_joined_hosts_cache = RoomMemberStore.__dict__["_get_joined_hosts_cache"]
|
||||
has_room_changed_since = DataStore.has_room_changed_since.__func__
|
||||
|
||||
get_unread_push_actions_for_user_in_range_for_http = (
|
||||
@@ -151,8 +153,7 @@ class SlavedEventStore(BaseSlavedStore):
|
||||
get_room_events_stream_for_rooms = (
|
||||
DataStore.get_room_events_stream_for_rooms.__func__
|
||||
)
|
||||
is_host_joined = DataStore.is_host_joined.__func__
|
||||
_is_host_joined = RoomMemberStore.__dict__["_is_host_joined"]
|
||||
is_host_joined = RoomMemberStore.__dict__["is_host_joined"]
|
||||
get_stream_token_for_event = DataStore.get_stream_token_for_event.__func__
|
||||
|
||||
_set_before_and_after = staticmethod(DataStore._set_before_and_after)
|
||||
|
||||
@@ -20,6 +20,7 @@ from twisted.internet.protocol import ReconnectingClientFactory
|
||||
|
||||
from .commands import (
|
||||
FederationAckCommand, UserSyncCommand, RemovePusherCommand, InvalidateCacheCommand,
|
||||
UserIpCommand,
|
||||
)
|
||||
from .protocol import ClientReplicationStreamProtocol
|
||||
|
||||
@@ -178,6 +179,12 @@ class ReplicationClientHandler(object):
|
||||
cmd = InvalidateCacheCommand(cache_func.__name__, keys)
|
||||
self.send_command(cmd)
|
||||
|
||||
def send_user_ip(self, user_id, access_token, ip, user_agent, device_id, last_seen):
|
||||
"""Tell the master that the user made a request.
|
||||
"""
|
||||
cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen)
|
||||
self.send_command(cmd)
|
||||
|
||||
def await_sync(self, data):
|
||||
"""Returns a deferred that is resolved when we receive a SYNC command
|
||||
with given data.
|
||||
|
||||
@@ -304,6 +304,40 @@ class InvalidateCacheCommand(Command):
|
||||
return " ".join((self.cache_func, json.dumps(self.keys)))
|
||||
|
||||
|
||||
class UserIpCommand(Command):
|
||||
"""Sent periodically when a worker sees activity from a client.
|
||||
|
||||
Format::
|
||||
|
||||
USER_IP <user_id>, <access_token>, <ip>, <device_id>, <last_seen>, <user_agent>
|
||||
"""
|
||||
NAME = "USER_IP"
|
||||
|
||||
def __init__(self, user_id, access_token, ip, user_agent, device_id, last_seen):
|
||||
self.user_id = user_id
|
||||
self.access_token = access_token
|
||||
self.ip = ip
|
||||
self.user_agent = user_agent
|
||||
self.device_id = device_id
|
||||
self.last_seen = last_seen
|
||||
|
||||
@classmethod
|
||||
def from_line(cls, line):
|
||||
user_id, jsn = line.split(" ", 1)
|
||||
|
||||
access_token, ip, user_agent, device_id, last_seen = json.loads(jsn)
|
||||
|
||||
return cls(
|
||||
user_id, access_token, ip, user_agent, device_id, last_seen
|
||||
)
|
||||
|
||||
def to_line(self):
|
||||
return self.user_id + " " + json.dumps((
|
||||
self.access_token, self.ip, self.user_agent, self.device_id,
|
||||
self.last_seen,
|
||||
))
|
||||
|
||||
|
||||
# Map of command name to command type.
|
||||
COMMAND_MAP = {
|
||||
cmd.NAME: cmd
|
||||
@@ -320,6 +354,7 @@ COMMAND_MAP = {
|
||||
SyncCommand,
|
||||
RemovePusherCommand,
|
||||
InvalidateCacheCommand,
|
||||
UserIpCommand,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -342,5 +377,6 @@ VALID_CLIENT_COMMANDS = (
|
||||
FederationAckCommand.NAME,
|
||||
RemovePusherCommand.NAME,
|
||||
InvalidateCacheCommand.NAME,
|
||||
UserIpCommand.NAME,
|
||||
ErrorCommand.NAME,
|
||||
)
|
||||
|
||||
@@ -244,7 +244,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
becoming full.
|
||||
"""
|
||||
if self.state == ConnectionStates.CLOSED:
|
||||
logger.info("[%s] Not sending, connection closed", self.id())
|
||||
logger.debug("[%s] Not sending, connection closed", self.id())
|
||||
return
|
||||
|
||||
if do_buffer and self.state != ConnectionStates.ESTABLISHED:
|
||||
@@ -264,7 +264,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
def _queue_command(self, cmd):
|
||||
"""Queue the command until the connection is ready to write to again.
|
||||
"""
|
||||
logger.info("[%s] Queing as conn %r, cmd: %r", self.id(), self.state, cmd)
|
||||
logger.debug("[%s] Queing as conn %r, cmd: %r", self.id(), self.state, cmd)
|
||||
self.pending_commands.append(cmd)
|
||||
|
||||
if len(self.pending_commands) > self.max_line_buffer:
|
||||
@@ -406,6 +406,12 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
def on_INVALIDATE_CACHE(self, cmd):
|
||||
self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
|
||||
|
||||
def on_USER_IP(self, cmd):
|
||||
self.streamer.on_user_ip(
|
||||
cmd.user_id, cmd.access_token, cmd.ip, cmd.user_agent, cmd.device_id,
|
||||
cmd.last_seen,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def subscribe_to_stream(self, stream_name, token):
|
||||
"""Subscribe the remote to a streams.
|
||||
|
||||
@@ -35,6 +35,7 @@ user_sync_counter = metrics.register_counter("user_sync")
|
||||
federation_ack_counter = metrics.register_counter("federation_ack")
|
||||
remove_pusher_counter = metrics.register_counter("remove_pusher")
|
||||
invalidate_cache_counter = metrics.register_counter("invalidate_cache")
|
||||
user_ip_cache_counter = metrics.register_counter("user_ip_cache")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -67,6 +68,7 @@ class ReplicationStreamer(object):
|
||||
self.store = hs.get_datastore()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
# Current connections.
|
||||
self.connections = []
|
||||
@@ -99,7 +101,7 @@ class ReplicationStreamer(object):
|
||||
if not hs.config.send_federation:
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
hs.get_notifier().add_replication_callback(self.on_notifier_poke)
|
||||
self.notifier.add_replication_callback(self.on_notifier_poke)
|
||||
|
||||
# Keeps track of whether we are currently checking for updates
|
||||
self.is_looping = False
|
||||
@@ -237,6 +239,15 @@ class ReplicationStreamer(object):
|
||||
invalidate_cache_counter.inc()
|
||||
getattr(self.store, cache_func).invalidate(tuple(keys))
|
||||
|
||||
@measure_func("repl.on_user_ip")
|
||||
def on_user_ip(self, user_id, access_token, ip, user_agent, device_id, last_seen):
|
||||
"""The client saw a user request
|
||||
"""
|
||||
user_ip_cache_counter.inc()
|
||||
self.store.insert_client_ip(
|
||||
user_id, access_token, ip, user_agent, device_id, last_seen,
|
||||
)
|
||||
|
||||
def send_sync_to_all_connections(self, data):
|
||||
"""Sends a SYNC command to all clients.
|
||||
|
||||
|
||||
@@ -112,6 +112,12 @@ AccountDataStreamRow = namedtuple("AccountDataStream", (
|
||||
"data_type", # str
|
||||
"data", # dict
|
||||
))
|
||||
CurrentStateDeltaStreamRow = namedtuple("CurrentStateDeltaStream", (
|
||||
"room_id", # str
|
||||
"type", # str
|
||||
"state_key", # str
|
||||
"event_id", # str, optional
|
||||
))
|
||||
|
||||
|
||||
class Stream(object):
|
||||
@@ -443,6 +449,21 @@ class AccountDataStream(Stream):
|
||||
defer.returnValue(results)
|
||||
|
||||
|
||||
class CurrentStateDeltaStream(Stream):
|
||||
"""Current state for a room was changed
|
||||
"""
|
||||
NAME = "current_state_deltas"
|
||||
ROW_TYPE = CurrentStateDeltaStreamRow
|
||||
|
||||
def __init__(self, hs):
|
||||
store = hs.get_datastore()
|
||||
|
||||
self.current_token = store.get_max_current_state_delta_stream_id
|
||||
self.update_function = store.get_all_updated_current_state_deltas
|
||||
|
||||
super(CurrentStateDeltaStream, self).__init__(hs)
|
||||
|
||||
|
||||
STREAMS_MAP = {
|
||||
stream.NAME: stream
|
||||
for stream in (
|
||||
@@ -460,5 +481,6 @@ STREAMS_MAP = {
|
||||
FederationStream,
|
||||
TagAccountDataStream,
|
||||
AccountDataStream,
|
||||
CurrentStateDeltaStream,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ from synapse.rest.client.v2_alpha import (
|
||||
devices,
|
||||
thirdparty,
|
||||
sendtodevice,
|
||||
user_directory,
|
||||
)
|
||||
|
||||
from synapse.http.server import JsonResource
|
||||
@@ -100,3 +101,4 @@ class ClientRestResource(JsonResource):
|
||||
devices.register_servlets(hs, client_resource)
|
||||
thirdparty.register_servlets(hs, client_resource)
|
||||
sendtodevice.register_servlets(hs, client_resource)
|
||||
user_directory.register_servlets(hs, client_resource)
|
||||
|
||||
@@ -15,8 +15,9 @@
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import AuthError, SynapseError
|
||||
from synapse.types import UserID
|
||||
from synapse.types import UserID, create_requester
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
|
||||
from .base import ClientV1RestServlet, client_path_patterns
|
||||
@@ -157,9 +158,145 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class ShutdownRoomRestServlet(ClientV1RestServlet):
|
||||
"""Shuts down a room by removing all local users from the room and blocking
|
||||
all future invites and joins to the room. Any local aliases will be repointed
|
||||
to a new room created by `new_room_user_id` and kicked users will be auto
|
||||
joined to the new room.
|
||||
"""
|
||||
PATTERNS = client_path_patterns("/admin/shutdown_room/(?P<room_id>[^/]+)")
|
||||
|
||||
DEFAULT_MESSAGE = (
|
||||
"Sharing illegal content on this server is not permitted and rooms in"
|
||||
" violation will be blocked."
|
||||
)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ShutdownRoomRestServlet, self).__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
self.handlers = hs.get_handlers()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
is_admin = yield self.auth.is_server_admin(requester.user)
|
||||
if not is_admin:
|
||||
raise AuthError(403, "You are not a server admin")
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
new_room_user_id = content.get("new_room_user_id")
|
||||
if not new_room_user_id:
|
||||
raise SynapseError(400, "Please provide field `new_room_user_id`")
|
||||
|
||||
room_creator_requester = create_requester(new_room_user_id)
|
||||
|
||||
message = content.get("message", self.DEFAULT_MESSAGE)
|
||||
room_name = content.get("room_name", "Content Violation Notification")
|
||||
|
||||
info = yield self.handlers.room_creation_handler.create_room(
|
||||
room_creator_requester,
|
||||
config={
|
||||
"preset": "public_chat",
|
||||
"name": room_name,
|
||||
"power_level_content_override": {
|
||||
"users_default": -10,
|
||||
},
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
new_room_id = info["room_id"]
|
||||
|
||||
msg_handler = self.handlers.message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
room_creator_requester,
|
||||
{
|
||||
"type": "m.room.message",
|
||||
"content": {"body": message, "msgtype": "m.text"},
|
||||
"room_id": new_room_id,
|
||||
"sender": new_room_user_id,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
requester_user_id = requester.user.to_string()
|
||||
|
||||
logger.info("Shutting down room %r", room_id)
|
||||
|
||||
yield self.store.block_room(room_id, requester_user_id)
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
kicked_users = []
|
||||
for user_id in users:
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
continue
|
||||
|
||||
logger.info("Kicking %r from %r...", user_id, room_id)
|
||||
|
||||
target_requester = create_requester(user_id)
|
||||
yield self.handlers.room_member_handler.update_membership(
|
||||
requester=target_requester,
|
||||
target=target_requester.user,
|
||||
room_id=room_id,
|
||||
action=Membership.LEAVE,
|
||||
content={},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
yield self.handlers.room_member_handler.forget(target_requester.user, room_id)
|
||||
|
||||
yield self.handlers.room_member_handler.update_membership(
|
||||
requester=target_requester,
|
||||
target=target_requester.user,
|
||||
room_id=new_room_id,
|
||||
action=Membership.JOIN,
|
||||
content={},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
kicked_users.append(user_id)
|
||||
|
||||
aliases_for_room = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
yield self.store.update_aliases_for_room(
|
||||
room_id, new_room_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"kicked_users": kicked_users,
|
||||
"local_aliases": aliases_for_room,
|
||||
"new_room_id": new_room_id,
|
||||
}))
|
||||
|
||||
|
||||
class QuarantineMediaInRoom(ClientV1RestServlet):
|
||||
"""Quarantines all media in a room so that no one can download it via
|
||||
this server.
|
||||
"""
|
||||
PATTERNS = client_path_patterns("/admin/quarantine_media/(?P<room_id>[^/]+)")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(QuarantineMediaInRoom, self).__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
is_admin = yield self.auth.is_server_admin(requester.user)
|
||||
if not is_admin:
|
||||
raise AuthError(403, "You are not a server admin")
|
||||
|
||||
num_quarantined = yield self.store.quarantine_media_ids_in_room(
|
||||
room_id, requester.user.to_string(),
|
||||
)
|
||||
|
||||
defer.returnValue((200, {"num_quarantined": num_quarantined}))
|
||||
|
||||
|
||||
class ResetPasswordRestServlet(ClientV1RestServlet):
|
||||
"""Post request to allow an administrator reset password for a user.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
Example:
|
||||
http://localhost:8008/_matrix/client/api/v1/admin/reset_password/
|
||||
@user:to_reset_password?access_token=admin_access_token
|
||||
@@ -182,7 +319,7 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, target_user_id):
|
||||
"""Post request to allow an administrator reset password for a user.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
"""
|
||||
UserID.from_string(target_user_id)
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
@@ -206,7 +343,7 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
|
||||
|
||||
class GetUsersPaginatedRestServlet(ClientV1RestServlet):
|
||||
"""Get request to get specific number of users from Synapse.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
Example:
|
||||
http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
|
||||
@admin:user?access_token=admin_access_token&start=0&limit=10
|
||||
@@ -225,7 +362,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, target_user_id):
|
||||
"""Get request to get specific number of users from Synapse.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
"""
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
@@ -258,7 +395,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, target_user_id):
|
||||
"""Post request to get specific number of users from Synapse..
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
Example:
|
||||
http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
|
||||
@admin:user?access_token=admin_access_token
|
||||
@@ -296,7 +433,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
|
||||
class SearchUsersRestServlet(ClientV1RestServlet):
|
||||
"""Get request to search user table for specific users according to
|
||||
search term.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have administrator access in Synapse.
|
||||
Example:
|
||||
http://localhost:8008/_matrix/client/api/v1/admin/search_users/
|
||||
@admin:user?access_token=admin_access_token&term=alice
|
||||
@@ -316,7 +453,7 @@ class SearchUsersRestServlet(ClientV1RestServlet):
|
||||
def on_GET(self, request, target_user_id):
|
||||
"""Get request to search user table for specific users according to
|
||||
search term.
|
||||
This need a user have a administrator access in Synapse.
|
||||
This needs user to have a administrator access in Synapse.
|
||||
"""
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
@@ -353,3 +490,5 @@ def register_servlets(hs, http_server):
|
||||
ResetPasswordRestServlet(hs).register(http_server)
|
||||
GetUsersPaginatedRestServlet(hs).register(http_server)
|
||||
SearchUsersRestServlet(hs).register(http_server)
|
||||
ShutdownRoomRestServlet(hs).register(http_server)
|
||||
QuarantineMediaInRoom(hs).register(http_server)
|
||||
|
||||
@@ -73,6 +73,7 @@ class PushersSetRestServlet(ClientV1RestServlet):
|
||||
def __init__(self, hs):
|
||||
super(PushersSetRestServlet, self).__init__(hs)
|
||||
self.notifier = hs.get_notifier()
|
||||
self.pusher_pool = self.hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
@@ -81,12 +82,10 @@ class PushersSetRestServlet(ClientV1RestServlet):
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
pusher_pool = self.hs.get_pusherpool()
|
||||
|
||||
if ('pushkey' in content and 'app_id' in content
|
||||
and 'kind' in content and
|
||||
content['kind'] is None):
|
||||
yield pusher_pool.remove_pusher(
|
||||
yield self.pusher_pool.remove_pusher(
|
||||
content['app_id'], content['pushkey'], user_id=user.to_string()
|
||||
)
|
||||
defer.returnValue((200, {}))
|
||||
@@ -109,14 +108,14 @@ class PushersSetRestServlet(ClientV1RestServlet):
|
||||
append = content['append']
|
||||
|
||||
if not append:
|
||||
yield pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
|
||||
yield self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
|
||||
app_id=content['app_id'],
|
||||
pushkey=content['pushkey'],
|
||||
not_user_id=user.to_string()
|
||||
)
|
||||
|
||||
try:
|
||||
yield pusher_pool.add_pusher(
|
||||
yield self.pusher_pool.add_pusher(
|
||||
user_id=user.to_string(),
|
||||
access_token=requester.access_token_id,
|
||||
kind=content['kind'],
|
||||
@@ -152,6 +151,7 @@ class PushersRemoveRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.notifier = hs.get_notifier()
|
||||
self.auth = hs.get_v1auth()
|
||||
self.pusher_pool = self.hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
@@ -161,10 +161,8 @@ class PushersRemoveRestServlet(RestServlet):
|
||||
app_id = parse_string(request, "app_id", required=True)
|
||||
pushkey = parse_string(request, "pushkey", required=True)
|
||||
|
||||
pusher_pool = self.hs.get_pusherpool()
|
||||
|
||||
try:
|
||||
yield pusher_pool.remove_pusher(
|
||||
yield self.pusher_pool.remove_pusher(
|
||||
app_id=app_id,
|
||||
pushkey=pushkey,
|
||||
user_id=user.to_string(),
|
||||
|
||||
@@ -398,22 +398,18 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
|
||||
|
||||
def __init__(self, hs):
|
||||
super(JoinedRoomMemberListRestServlet, self).__init__(hs)
|
||||
self.state = hs.get_state_handler()
|
||||
self.message_handler = hs.get_handlers().message_handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
users_with_profile = yield self.message_handler.get_joined_members(
|
||||
requester, room_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"joined": {
|
||||
user_id: {
|
||||
"avatar_url": profile.avatar_url,
|
||||
"display_name": profile.display_name,
|
||||
}
|
||||
for user_id, profile in users_with_profile.iteritems()
|
||||
}
|
||||
"joined": users_with_profile,
|
||||
}))
|
||||
|
||||
|
||||
|
||||
@@ -188,13 +188,11 @@ class KeyChangesServlet(RestServlet):
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
changed = yield self.device_handler.get_user_ids_changed(
|
||||
results = yield self.device_handler.get_user_ids_changed(
|
||||
user_id, from_token,
|
||||
)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"changed": list(changed),
|
||||
}))
|
||||
defer.returnValue((200, results))
|
||||
|
||||
|
||||
class OneTimeKeyServlet(RestServlet):
|
||||
|
||||
@@ -110,7 +110,7 @@ class SyncRestServlet(RestServlet):
|
||||
filter_id = parse_string(request, "filter", default=None)
|
||||
full_state = parse_boolean(request, "full_state", default=False)
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"/sync: user=%r, timeout=%r, since=%r,"
|
||||
" set_presence=%r, filter_id=%r, device_id=%r" % (
|
||||
user, timeout, since, set_presence, filter_id, device_id
|
||||
@@ -164,27 +164,35 @@ class SyncRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
joined = self.encode_joined(
|
||||
sync_result.joined, time_now, requester.access_token_id, filter.event_fields
|
||||
response_content = self.encode_response(
|
||||
time_now, sync_result, requester.access_token_id, filter
|
||||
)
|
||||
|
||||
invited = self.encode_invited(
|
||||
sync_result.invited, time_now, requester.access_token_id
|
||||
defer.returnValue((200, response_content))
|
||||
|
||||
@staticmethod
|
||||
def encode_response(time_now, sync_result, access_token_id, filter):
|
||||
joined = SyncRestServlet.encode_joined(
|
||||
sync_result.joined, time_now, access_token_id, filter.event_fields
|
||||
)
|
||||
|
||||
archived = self.encode_archived(
|
||||
sync_result.archived, time_now, requester.access_token_id,
|
||||
invited = SyncRestServlet.encode_invited(
|
||||
sync_result.invited, time_now, access_token_id,
|
||||
)
|
||||
|
||||
archived = SyncRestServlet.encode_archived(
|
||||
sync_result.archived, time_now, access_token_id,
|
||||
filter.event_fields,
|
||||
)
|
||||
|
||||
response_content = {
|
||||
return {
|
||||
"account_data": {"events": sync_result.account_data},
|
||||
"to_device": {"events": sync_result.to_device},
|
||||
"device_lists": {
|
||||
"changed": list(sync_result.device_lists),
|
||||
"changed": list(sync_result.device_lists.changed),
|
||||
"left": list(sync_result.device_lists.left),
|
||||
},
|
||||
"presence": self.encode_presence(
|
||||
"presence": SyncRestServlet.encode_presence(
|
||||
sync_result.presence, time_now
|
||||
),
|
||||
"rooms": {
|
||||
@@ -192,12 +200,12 @@ class SyncRestServlet(RestServlet):
|
||||
"invite": invited,
|
||||
"leave": archived,
|
||||
},
|
||||
"device_one_time_keys_count": sync_result.device_one_time_keys_count,
|
||||
"next_batch": sync_result.next_batch.to_string(),
|
||||
}
|
||||
|
||||
defer.returnValue((200, response_content))
|
||||
|
||||
def encode_presence(self, events, time_now):
|
||||
@staticmethod
|
||||
def encode_presence(events, time_now):
|
||||
return {
|
||||
"events": [
|
||||
{
|
||||
@@ -211,7 +219,8 @@ class SyncRestServlet(RestServlet):
|
||||
]
|
||||
}
|
||||
|
||||
def encode_joined(self, rooms, time_now, token_id, event_fields):
|
||||
@staticmethod
|
||||
def encode_joined(rooms, time_now, token_id, event_fields):
|
||||
"""
|
||||
Encode the joined rooms in a sync result
|
||||
|
||||
@@ -230,13 +239,14 @@ class SyncRestServlet(RestServlet):
|
||||
"""
|
||||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = self.encode_room(
|
||||
joined[room.room_id] = SyncRestServlet.encode_room(
|
||||
room, time_now, token_id, only_fields=event_fields
|
||||
)
|
||||
|
||||
return joined
|
||||
|
||||
def encode_invited(self, rooms, time_now, token_id):
|
||||
@staticmethod
|
||||
def encode_invited(rooms, time_now, token_id):
|
||||
"""
|
||||
Encode the invited rooms in a sync result
|
||||
|
||||
@@ -269,7 +279,8 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
return invited
|
||||
|
||||
def encode_archived(self, rooms, time_now, token_id, event_fields):
|
||||
@staticmethod
|
||||
def encode_archived(rooms, time_now, token_id, event_fields):
|
||||
"""
|
||||
Encode the archived rooms in a sync result
|
||||
|
||||
@@ -288,7 +299,7 @@ class SyncRestServlet(RestServlet):
|
||||
"""
|
||||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = self.encode_room(
|
||||
joined[room.room_id] = SyncRestServlet.encode_room(
|
||||
room, time_now, token_id, joined=False, only_fields=event_fields
|
||||
)
|
||||
|
||||
|
||||
79
synapse/rest/client/v2_alpha/user_directory.py
Normal file
79
synapse/rest/client/v2_alpha/user_directory.py
Normal file
@@ -0,0 +1,79 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from ._base import client_v2_patterns
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserDirectorySearchRestServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/user_directory/search$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
super(UserDirectorySearchRestServlet, self).__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
"""Searches for users in directory
|
||||
|
||||
Returns:
|
||||
dict of the form::
|
||||
|
||||
{
|
||||
"limited": <bool>, # whether there were more results or not
|
||||
"results": [ # Ordered by best match first
|
||||
{
|
||||
"user_id": <user_id>,
|
||||
"display_name": <display_name>,
|
||||
"avatar_url": <avatar_url>
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=False)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
limit = body.get("limit", 10)
|
||||
limit = min(limit, 50)
|
||||
|
||||
try:
|
||||
search_term = body["search_term"]
|
||||
except:
|
||||
raise SynapseError(400, "`search_term` is required field")
|
||||
|
||||
results = yield self.user_directory_handler.search_users(
|
||||
user_id, search_term, limit,
|
||||
)
|
||||
|
||||
defer.returnValue((200, results))
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
UserDirectorySearchRestServlet(hs).register(http_server)
|
||||
@@ -66,14 +66,19 @@ class DownloadResource(Resource):
|
||||
@defer.inlineCallbacks
|
||||
def _respond_local_file(self, request, media_id, name):
|
||||
media_info = yield self.store.get_local_media(media_id)
|
||||
if not media_info:
|
||||
if not media_info or media_info["quarantined_by"]:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
media_type = media_info["media_type"]
|
||||
media_length = media_info["media_length"]
|
||||
upload_name = name if name else media_info["upload_name"]
|
||||
file_path = self.filepaths.local_media_filepath(media_id)
|
||||
if media_info["url_cache"]:
|
||||
# TODO: Check the file still exists, if it doesn't we can redownload
|
||||
# it from the url `media_info["url_cache"]`
|
||||
file_path = self.filepaths.url_cache_filepath(media_id)
|
||||
else:
|
||||
file_path = self.filepaths.local_media_filepath(media_id)
|
||||
|
||||
yield respond_with_file(
|
||||
request, media_type, file_path, media_length,
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d")
|
||||
|
||||
|
||||
class MediaFilePaths(object):
|
||||
@@ -71,3 +74,107 @@ class MediaFilePaths(object):
|
||||
self.base_path, "remote_thumbnail", server_name,
|
||||
file_id[0:2], file_id[2:4], file_id[4:],
|
||||
)
|
||||
|
||||
def url_cache_filepath(self, media_id):
|
||||
if NEW_FORMAT_ID_RE.match(media_id):
|
||||
# Media id is of the form <DATE><RANDOM_STRING>
|
||||
# E.g.: 2017-09-28-fsdRDt24DS234dsf
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache",
|
||||
media_id[:10], media_id[11:]
|
||||
)
|
||||
else:
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache",
|
||||
media_id[0:2], media_id[2:4], media_id[4:],
|
||||
)
|
||||
|
||||
def url_cache_filepath_dirs_to_delete(self, media_id):
|
||||
"The dirs to try and remove if we delete the media_id file"
|
||||
if NEW_FORMAT_ID_RE.match(media_id):
|
||||
return [
|
||||
os.path.join(
|
||||
self.base_path, "url_cache",
|
||||
media_id[:10],
|
||||
),
|
||||
]
|
||||
else:
|
||||
return [
|
||||
os.path.join(
|
||||
self.base_path, "url_cache",
|
||||
media_id[0:2], media_id[2:4],
|
||||
),
|
||||
os.path.join(
|
||||
self.base_path, "url_cache",
|
||||
media_id[0:2],
|
||||
),
|
||||
]
|
||||
|
||||
def url_cache_thumbnail(self, media_id, width, height, content_type,
|
||||
method):
|
||||
# Media id is of the form <DATE><RANDOM_STRING>
|
||||
# E.g.: 2017-09-28-fsdRDt24DS234dsf
|
||||
|
||||
top_level_type, sub_type = content_type.split("/")
|
||||
file_name = "%i-%i-%s-%s-%s" % (
|
||||
width, height, top_level_type, sub_type, method
|
||||
)
|
||||
|
||||
if NEW_FORMAT_ID_RE.match(media_id):
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[:10], media_id[11:],
|
||||
file_name
|
||||
)
|
||||
else:
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[0:2], media_id[2:4], media_id[4:],
|
||||
file_name
|
||||
)
|
||||
|
||||
def url_cache_thumbnail_directory(self, media_id):
|
||||
# Media id is of the form <DATE><RANDOM_STRING>
|
||||
# E.g.: 2017-09-28-fsdRDt24DS234dsf
|
||||
|
||||
if NEW_FORMAT_ID_RE.match(media_id):
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[:10], media_id[11:],
|
||||
)
|
||||
else:
|
||||
return os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[0:2], media_id[2:4], media_id[4:],
|
||||
)
|
||||
|
||||
def url_cache_thumbnail_dirs_to_delete(self, media_id):
|
||||
"The dirs to try and remove if we delete the media_id thumbnails"
|
||||
# Media id is of the form <DATE><RANDOM_STRING>
|
||||
# E.g.: 2017-09-28-fsdRDt24DS234dsf
|
||||
if NEW_FORMAT_ID_RE.match(media_id):
|
||||
return [
|
||||
os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[:10], media_id[11:],
|
||||
),
|
||||
os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[:10],
|
||||
),
|
||||
]
|
||||
else:
|
||||
return [
|
||||
os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[0:2], media_id[2:4], media_id[4:],
|
||||
),
|
||||
os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[0:2], media_id[2:4],
|
||||
),
|
||||
os.path.join(
|
||||
self.base_path, "url_cache_thumbnails",
|
||||
media_id[0:2],
|
||||
),
|
||||
]
|
||||
|
||||
@@ -135,6 +135,8 @@ class MediaRepository(object):
|
||||
media_info = yield self._download_remote_file(
|
||||
server_name, media_id
|
||||
)
|
||||
elif media_info["quarantined_by"]:
|
||||
raise NotFoundError()
|
||||
else:
|
||||
self.recently_accessed_remotes.add((server_name, media_id))
|
||||
yield self.store.update_cached_last_access_time(
|
||||
@@ -184,6 +186,7 @@ class MediaRepository(object):
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warn("Not retrying destination %r", server_name)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
except Exception:
|
||||
logger.exception("Failed to fetch remote media %s/%s",
|
||||
server_name, media_id)
|
||||
@@ -323,13 +326,17 @@ class MediaRepository(object):
|
||||
defer.returnValue(t_path)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_local_thumbnails(self, media_id, media_info):
|
||||
def _generate_local_thumbnails(self, media_id, media_info, url_cache=False):
|
||||
media_type = media_info["media_type"]
|
||||
requirements = self._get_thumbnail_requirements(media_type)
|
||||
if not requirements:
|
||||
return
|
||||
|
||||
input_path = self.filepaths.local_media_filepath(media_id)
|
||||
if url_cache:
|
||||
input_path = self.filepaths.url_cache_filepath(media_id)
|
||||
else:
|
||||
input_path = self.filepaths.local_media_filepath(media_id)
|
||||
|
||||
thumbnailer = Thumbnailer(input_path)
|
||||
m_width = thumbnailer.width
|
||||
m_height = thumbnailer.height
|
||||
@@ -357,9 +364,14 @@ class MediaRepository(object):
|
||||
|
||||
for t_width, t_height, t_type in scales:
|
||||
t_method = "scale"
|
||||
t_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
if url_cache:
|
||||
t_path = self.filepaths.url_cache_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
else:
|
||||
t_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
self._makedirs(t_path)
|
||||
t_len = thumbnailer.scale(t_path, t_width, t_height, t_type)
|
||||
|
||||
@@ -374,9 +386,14 @@ class MediaRepository(object):
|
||||
# thumbnail.
|
||||
continue
|
||||
t_method = "crop"
|
||||
t_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
if url_cache:
|
||||
t_path = self.filepaths.url_cache_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
else:
|
||||
t_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method
|
||||
)
|
||||
self._makedirs(t_path)
|
||||
t_len = thumbnailer.crop(t_path, t_width, t_height, t_type)
|
||||
local_thumbnails.append((
|
||||
|
||||
@@ -36,6 +36,9 @@ import cgi
|
||||
import ujson as json
|
||||
import urlparse
|
||||
import itertools
|
||||
import datetime
|
||||
import errno
|
||||
import shutil
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -70,6 +73,10 @@ class PreviewUrlResource(Resource):
|
||||
|
||||
self.downloads = {}
|
||||
|
||||
self._cleaner_loop = self.clock.looping_call(
|
||||
self._expire_url_cache_data, 10 * 1000
|
||||
)
|
||||
|
||||
def render_GET(self, request):
|
||||
self._async_render_GET(request)
|
||||
return NOT_DONE_YET
|
||||
@@ -130,7 +137,7 @@ class PreviewUrlResource(Resource):
|
||||
cache_result = yield self.store.get_url_cache(url, ts)
|
||||
if (
|
||||
cache_result and
|
||||
cache_result["download_ts"] + cache_result["expires"] > ts and
|
||||
cache_result["expires_ts"] > ts and
|
||||
cache_result["response_code"] / 100 == 2
|
||||
):
|
||||
respond_with_json_bytes(
|
||||
@@ -164,7 +171,7 @@ class PreviewUrlResource(Resource):
|
||||
|
||||
if _is_media(media_info['media_type']):
|
||||
dims = yield self.media_repo._generate_local_thumbnails(
|
||||
media_info['filesystem_id'], media_info
|
||||
media_info['filesystem_id'], media_info, url_cache=True,
|
||||
)
|
||||
|
||||
og = {
|
||||
@@ -210,7 +217,7 @@ class PreviewUrlResource(Resource):
|
||||
if _is_media(image_info['media_type']):
|
||||
# TODO: make sure we don't choke on white-on-transparent images
|
||||
dims = yield self.media_repo._generate_local_thumbnails(
|
||||
image_info['filesystem_id'], image_info
|
||||
image_info['filesystem_id'], image_info, url_cache=True,
|
||||
)
|
||||
if dims:
|
||||
og["og:image:width"] = dims['width']
|
||||
@@ -239,7 +246,7 @@ class PreviewUrlResource(Resource):
|
||||
url,
|
||||
media_info["response_code"],
|
||||
media_info["etag"],
|
||||
media_info["expires"],
|
||||
media_info["expires"] + media_info["created_ts"],
|
||||
json.dumps(og),
|
||||
media_info["filesystem_id"],
|
||||
media_info["created_ts"],
|
||||
@@ -253,10 +260,9 @@ class PreviewUrlResource(Resource):
|
||||
# we're most likely being explicitly triggered by a human rather than a
|
||||
# bot, so are we really a robot?
|
||||
|
||||
# XXX: horrible duplication with base_resource's _download_remote_file()
|
||||
file_id = random_string(24)
|
||||
file_id = datetime.date.today().isoformat() + '_' + random_string(16)
|
||||
|
||||
fname = self.filepaths.local_media_filepath(file_id)
|
||||
fname = self.filepaths.url_cache_filepath(file_id)
|
||||
self.media_repo._makedirs(fname)
|
||||
|
||||
try:
|
||||
@@ -303,6 +309,7 @@ class PreviewUrlResource(Resource):
|
||||
upload_name=download_name,
|
||||
media_length=length,
|
||||
user_id=user,
|
||||
url_cache=url,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -327,6 +334,88 @@ class PreviewUrlResource(Resource):
|
||||
"etag": headers["ETag"][0] if "ETag" in headers else None,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _expire_url_cache_data(self):
|
||||
"""Clean up expired url cache content, media and thumbnails.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
|
||||
# First we delete expired url cache entries
|
||||
media_ids = yield self.store.get_expired_url_cache(now)
|
||||
|
||||
removed_media = []
|
||||
for media_id in media_ids:
|
||||
fname = self.filepaths.url_cache_filepath(media_id)
|
||||
try:
|
||||
os.remove(fname)
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
|
||||
try:
|
||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||
for dir in dirs:
|
||||
os.rmdir(dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
yield self.store.delete_url_cache(removed_media)
|
||||
|
||||
if removed_media:
|
||||
logger.info("Deleted %d entries from url cache", len(removed_media))
|
||||
|
||||
# Now we delete old images associated with the url cache.
|
||||
# These may be cached for a bit on the client (i.e., they
|
||||
# may have a room open with a preview url thing open).
|
||||
# So we wait a couple of days before deleting, just in case.
|
||||
expire_before = now - 2 * 24 * 60 * 60 * 1000
|
||||
media_ids = yield self.store.get_url_cache_media_before(expire_before)
|
||||
|
||||
removed_media = []
|
||||
for media_id in media_ids:
|
||||
fname = self.filepaths.url_cache_filepath(media_id)
|
||||
try:
|
||||
os.remove(fname)
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
try:
|
||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||
for dir in dirs:
|
||||
os.rmdir(dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
|
||||
try:
|
||||
shutil.rmtree(thumbnail_dir)
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
|
||||
try:
|
||||
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
|
||||
for dir in dirs:
|
||||
os.rmdir(dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
yield self.store.delete_url_cache_media(removed_media)
|
||||
|
||||
if removed_media:
|
||||
logger.info("Deleted %d media from url cache", len(removed_media))
|
||||
|
||||
|
||||
def decode_and_calc_og(body, media_uri, request_encoding=None):
|
||||
from lxml import etree
|
||||
@@ -434,6 +523,8 @@ def _calc_og(tree, media_uri):
|
||||
for el in _iterate_over_text(tree.find("body"), *TAGS_TO_REMOVE)
|
||||
)
|
||||
og['og:description'] = summarize_paragraphs(text_nodes)
|
||||
else:
|
||||
og['og:description'] = summarize_paragraphs([og['og:description']])
|
||||
|
||||
# TODO: delete the url downloads to stop diskfilling,
|
||||
# as we only ever cared about its OG
|
||||
|
||||
@@ -81,7 +81,7 @@ class ThumbnailResource(Resource):
|
||||
method, m_type):
|
||||
media_info = yield self.store.get_local_media(media_id)
|
||||
|
||||
if not media_info:
|
||||
if not media_info or media_info["quarantined_by"]:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
@@ -101,9 +101,16 @@ class ThumbnailResource(Resource):
|
||||
t_type = thumbnail_info["thumbnail_type"]
|
||||
t_method = thumbnail_info["thumbnail_method"]
|
||||
|
||||
file_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method,
|
||||
)
|
||||
if media_info["url_cache"]:
|
||||
# TODO: Check the file still exists, if it doesn't we can redownload
|
||||
# it from the url `media_info["url_cache"]`
|
||||
file_path = self.filepaths.url_cache_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method,
|
||||
)
|
||||
else:
|
||||
file_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method,
|
||||
)
|
||||
yield respond_with_file(request, t_type, file_path)
|
||||
|
||||
else:
|
||||
@@ -117,7 +124,7 @@ class ThumbnailResource(Resource):
|
||||
desired_type):
|
||||
media_info = yield self.store.get_local_media(media_id)
|
||||
|
||||
if not media_info:
|
||||
if not media_info or media_info["quarantined_by"]:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
@@ -134,9 +141,18 @@ class ThumbnailResource(Resource):
|
||||
t_type = info["thumbnail_type"] == desired_type
|
||||
|
||||
if t_w and t_h and t_method and t_type:
|
||||
file_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, desired_width, desired_height, desired_type, desired_method,
|
||||
)
|
||||
if media_info["url_cache"]:
|
||||
# TODO: Check the file still exists, if it doesn't we can redownload
|
||||
# it from the url `media_info["url_cache"]`
|
||||
file_path = self.filepaths.url_cache_thumbnail(
|
||||
media_id, desired_width, desired_height, desired_type,
|
||||
desired_method,
|
||||
)
|
||||
else:
|
||||
file_path = self.filepaths.local_media_thumbnail(
|
||||
media_id, desired_width, desired_height, desired_type,
|
||||
desired_method,
|
||||
)
|
||||
yield respond_with_file(request, desired_type, file_path)
|
||||
return
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.appservice.scheduler import ApplicationServiceScheduler
|
||||
from synapse.crypto.keyring import Keyring
|
||||
from synapse.events.builder import EventBuilderFactory
|
||||
from synapse.events.spamcheck import SpamChecker
|
||||
from synapse.federation import initialize_http_replication
|
||||
from synapse.federation.send_queue import FederationRemoteSendQueue
|
||||
from synapse.federation.transport.client import TransportLayerClient
|
||||
@@ -49,9 +50,11 @@ from synapse.handlers.events import EventHandler, EventStreamHandler
|
||||
from synapse.handlers.initial_sync import InitialSyncHandler
|
||||
from synapse.handlers.receipts import ReceiptsHandler
|
||||
from synapse.handlers.read_marker import ReadMarkerHandler
|
||||
from synapse.handlers.user_directory import UserDirectoyHandler
|
||||
from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.notifier import Notifier
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
from synapse.push.pusherpool import PusherPool
|
||||
from synapse.rest.media.v1.media_repository import MediaRepository
|
||||
from synapse.state import StateHandler
|
||||
@@ -135,6 +138,9 @@ class HomeServer(object):
|
||||
'macaroon_generator',
|
||||
'tcp_replication',
|
||||
'read_marker_handler',
|
||||
'action_generator',
|
||||
'user_directory_handler',
|
||||
'spam_checker',
|
||||
]
|
||||
|
||||
def __init__(self, hostname, **kwargs):
|
||||
@@ -299,6 +305,15 @@ class HomeServer(object):
|
||||
def build_tcp_replication(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_action_generator(self):
|
||||
return ActionGenerator(self)
|
||||
|
||||
def build_user_directory_handler(self):
|
||||
return UserDirectoyHandler(self)
|
||||
|
||||
def build_spam_checker(self):
|
||||
return SpamChecker(self)
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
|
||||
@@ -24,13 +24,13 @@ from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
|
||||
from collections import namedtuple
|
||||
from frozendict import frozendict
|
||||
|
||||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -38,9 +38,6 @@ logger = logging.getLogger(__name__)
|
||||
KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key"))
|
||||
|
||||
|
||||
CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
|
||||
|
||||
|
||||
SIZE_OF_CACHE = int(100000 * CACHE_SIZE_FACTOR)
|
||||
EVICTION_TIMEOUT_SECONDS = 60 * 60
|
||||
|
||||
@@ -170,9 +167,7 @@ class StateHandler(object):
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
logger.debug("calling resolve_state_groups from get_current_user_in_room")
|
||||
entry = yield self.resolve_state_groups(room_id, latest_event_ids)
|
||||
joined_users = yield self.store.get_joined_users_from_state(
|
||||
room_id, entry.state_id, entry.state
|
||||
)
|
||||
joined_users = yield self.store.get_joined_users_from_state(room_id, entry)
|
||||
defer.returnValue(joined_users)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -181,9 +176,7 @@ class StateHandler(object):
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
logger.debug("calling resolve_state_groups from get_current_hosts_in_room")
|
||||
entry = yield self.resolve_state_groups(room_id, latest_event_ids)
|
||||
joined_hosts = yield self.store.get_joined_hosts(
|
||||
room_id, entry.state_id, entry.state
|
||||
)
|
||||
joined_hosts = yield self.store.get_joined_hosts(room_id, entry)
|
||||
defer.returnValue(joined_hosts)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -195,12 +188,12 @@ class StateHandler(object):
|
||||
Returns:
|
||||
synapse.events.snapshot.EventContext:
|
||||
"""
|
||||
context = EventContext()
|
||||
|
||||
if event.internal_metadata.is_outlier():
|
||||
# If this is an outlier, then we know it shouldn't have any current
|
||||
# state. Certainly store.get_current_state won't return any, and
|
||||
# persisting the event won't store the state group.
|
||||
context = EventContext()
|
||||
if old_state:
|
||||
context.prev_state_ids = {
|
||||
(s.type, s.state_key): s.event_id for s in old_state
|
||||
@@ -219,6 +212,7 @@ class StateHandler(object):
|
||||
defer.returnValue(context)
|
||||
|
||||
if old_state:
|
||||
context = EventContext()
|
||||
context.prev_state_ids = {
|
||||
(s.type, s.state_key): s.event_id for s in old_state
|
||||
}
|
||||
@@ -239,19 +233,13 @@ class StateHandler(object):
|
||||
defer.returnValue(context)
|
||||
|
||||
logger.debug("calling resolve_state_groups from compute_event_context")
|
||||
if event.is_state():
|
||||
entry = yield self.resolve_state_groups(
|
||||
event.room_id, [e for e, _ in event.prev_events],
|
||||
event_type=event.type,
|
||||
state_key=event.state_key,
|
||||
)
|
||||
else:
|
||||
entry = yield self.resolve_state_groups(
|
||||
event.room_id, [e for e, _ in event.prev_events],
|
||||
)
|
||||
entry = yield self.resolve_state_groups(
|
||||
event.room_id, [e for e, _ in event.prev_events],
|
||||
)
|
||||
|
||||
curr_state = entry.state
|
||||
|
||||
context = EventContext()
|
||||
context.prev_state_ids = curr_state
|
||||
if event.is_state():
|
||||
context.state_group = self.store.get_next_state_group()
|
||||
@@ -264,10 +252,14 @@ class StateHandler(object):
|
||||
context.current_state_ids = dict(context.prev_state_ids)
|
||||
context.current_state_ids[key] = event.event_id
|
||||
|
||||
context.prev_group = entry.prev_group
|
||||
context.delta_ids = entry.delta_ids
|
||||
if context.delta_ids is not None:
|
||||
context.delta_ids = dict(context.delta_ids)
|
||||
if entry.state_group:
|
||||
context.prev_group = entry.state_group
|
||||
context.delta_ids = {
|
||||
key: event.event_id
|
||||
}
|
||||
elif entry.prev_group:
|
||||
context.prev_group = entry.prev_group
|
||||
context.delta_ids = dict(entry.delta_ids)
|
||||
context.delta_ids[key] = event.event_id
|
||||
else:
|
||||
if entry.state_group is None:
|
||||
@@ -284,7 +276,7 @@ class StateHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def resolve_state_groups(self, room_id, event_ids, event_type=None, state_key=""):
|
||||
def resolve_state_groups(self, room_id, event_ids):
|
||||
""" Given a list of event_ids this method fetches the state at each
|
||||
event, resolves conflicts between them and returns them.
|
||||
|
||||
@@ -309,11 +301,13 @@ class StateHandler(object):
|
||||
if len(group_names) == 1:
|
||||
name, state_list = state_groups_ids.items().pop()
|
||||
|
||||
prev_group, delta_ids = yield self.store.get_state_group_delta(name)
|
||||
|
||||
defer.returnValue(_StateCacheEntry(
|
||||
state=state_list,
|
||||
state_group=name,
|
||||
prev_group=name,
|
||||
delta_ids={},
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
))
|
||||
|
||||
with (yield self.resolve_linearizer.queue(group_names)):
|
||||
@@ -357,20 +351,18 @@ class StateHandler(object):
|
||||
if new_state_event_ids == frozenset(e_id for e_id in events):
|
||||
state_group = sg
|
||||
break
|
||||
if state_group is None:
|
||||
# Worker instances don't have access to this method, but we want
|
||||
# to set the state_group on the main instance to increase cache
|
||||
# hits.
|
||||
if hasattr(self.store, "get_next_state_group"):
|
||||
state_group = self.store.get_next_state_group()
|
||||
|
||||
# TODO: We want to create a state group for this set of events, to
|
||||
# increase cache hits, but we need to make sure that it doesn't
|
||||
# end up as a prev_group without being added to the database
|
||||
|
||||
prev_group = None
|
||||
delta_ids = None
|
||||
for old_group, old_ids in state_groups_ids.items():
|
||||
if not set(new_state.iterkeys()) - set(old_ids.iterkeys()):
|
||||
for old_group, old_ids in state_groups_ids.iteritems():
|
||||
if not set(new_state) - set(old_ids):
|
||||
n_delta_ids = {
|
||||
k: v
|
||||
for k, v in new_state.items()
|
||||
for k, v in new_state.iteritems()
|
||||
if old_ids.get(k) != v
|
||||
}
|
||||
if not delta_ids or len(n_delta_ids) < len(delta_ids):
|
||||
|
||||
@@ -49,6 +49,7 @@ from .tags import TagsStore
|
||||
from .account_data import AccountDataStore
|
||||
from .openid import OpenIdStore
|
||||
from .client_ips import ClientIpStore
|
||||
from .user_directory import UserDirectoryStore
|
||||
|
||||
from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator
|
||||
from .engines import PostgresEngine
|
||||
@@ -86,6 +87,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||
ClientIpStore,
|
||||
DeviceStore,
|
||||
DeviceInboxStore,
|
||||
UserDirectoryStore,
|
||||
):
|
||||
|
||||
def __init__(self, db_conn, hs):
|
||||
@@ -221,17 +223,30 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||
"DeviceListFederationStreamChangeCache", device_list_max,
|
||||
)
|
||||
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||
db_conn, "current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
stream_column="stream_id",
|
||||
max_value=events_max, # As we share the stream id with events token
|
||||
limit=1000,
|
||||
)
|
||||
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||
prefilled_cache=curr_state_delta_prefill,
|
||||
)
|
||||
|
||||
cur = LoggingTransaction(
|
||||
db_conn.cursor(),
|
||||
name="_find_stream_orderings_for_times_txn",
|
||||
database_engine=self.database_engine,
|
||||
after_callbacks=[]
|
||||
after_callbacks=[],
|
||||
final_callbacks=[],
|
||||
)
|
||||
self._find_stream_orderings_for_times_txn(cur)
|
||||
cur.close()
|
||||
|
||||
self.find_stream_orderings_looping_call = self._clock.looping_call(
|
||||
self._find_stream_orderings_for_times, 60 * 60 * 1000
|
||||
self._find_stream_orderings_for_times, 10 * 60 * 1000
|
||||
)
|
||||
|
||||
self._stream_order_on_start = self.get_room_max_stream_ordering()
|
||||
@@ -272,31 +287,23 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||
Counts the number of users who used this homeserver in the last 24 hours.
|
||||
"""
|
||||
def _count_users(txn):
|
||||
txn.execute(
|
||||
"SELECT COUNT(DISTINCT user_id) AS users"
|
||||
" FROM user_ips"
|
||||
" WHERE last_seen > ?",
|
||||
# This is close enough to a day for our purposes.
|
||||
(int(self._clock.time_msec()) - (1000 * 60 * 60 * 24),)
|
||||
)
|
||||
rows = self.cursor_to_dict(txn)
|
||||
if rows:
|
||||
return rows[0]["users"]
|
||||
return 0
|
||||
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24),
|
||||
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT user_id FROM user_ips
|
||||
WHERE last_seen > ?
|
||||
GROUP BY user_id
|
||||
) u
|
||||
"""
|
||||
|
||||
txn.execute(sql, (yesterday,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_users", _count_users)
|
||||
defer.returnValue(ret)
|
||||
|
||||
def get_user_ip_and_agents(self, user):
|
||||
return self._simple_select_list(
|
||||
table="user_ips",
|
||||
keyvalues={"user_id": user.to_string()},
|
||||
retcols=[
|
||||
"access_token", "ip", "user_agent", "last_seen"
|
||||
],
|
||||
desc="get_user_ip_and_agents",
|
||||
)
|
||||
|
||||
def get_users(self):
|
||||
"""Function to reterive a list of users in users table.
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import logging
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches.dictionary_cache import DictionaryCache
|
||||
from synapse.util.caches.descriptors import Cache
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
@@ -27,10 +28,6 @@ from twisted.internet import defer
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
import os
|
||||
|
||||
|
||||
CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -52,13 +49,17 @@ class LoggingTransaction(object):
|
||||
"""An object that almost-transparently proxies for the 'txn' object
|
||||
passed to the constructor. Adds logging and metrics to the .execute()
|
||||
method."""
|
||||
__slots__ = ["txn", "name", "database_engine", "after_callbacks"]
|
||||
__slots__ = [
|
||||
"txn", "name", "database_engine", "after_callbacks", "final_callbacks",
|
||||
]
|
||||
|
||||
def __init__(self, txn, name, database_engine, after_callbacks):
|
||||
def __init__(self, txn, name, database_engine, after_callbacks,
|
||||
final_callbacks):
|
||||
object.__setattr__(self, "txn", txn)
|
||||
object.__setattr__(self, "name", name)
|
||||
object.__setattr__(self, "database_engine", database_engine)
|
||||
object.__setattr__(self, "after_callbacks", after_callbacks)
|
||||
object.__setattr__(self, "final_callbacks", final_callbacks)
|
||||
|
||||
def call_after(self, callback, *args, **kwargs):
|
||||
"""Call the given callback on the main twisted thread after the
|
||||
@@ -67,6 +68,9 @@ class LoggingTransaction(object):
|
||||
"""
|
||||
self.after_callbacks.append((callback, args, kwargs))
|
||||
|
||||
def call_finally(self, callback, *args, **kwargs):
|
||||
self.final_callbacks.append((callback, args, kwargs))
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.txn, name)
|
||||
|
||||
@@ -217,8 +221,8 @@ class SQLBaseStore(object):
|
||||
|
||||
self._clock.looping_call(loop, 10000)
|
||||
|
||||
def _new_transaction(self, conn, desc, after_callbacks, logging_context,
|
||||
func, *args, **kwargs):
|
||||
def _new_transaction(self, conn, desc, after_callbacks, final_callbacks,
|
||||
logging_context, func, *args, **kwargs):
|
||||
start = time.time() * 1000
|
||||
txn_id = self._TXN_ID
|
||||
|
||||
@@ -237,7 +241,8 @@ class SQLBaseStore(object):
|
||||
try:
|
||||
txn = conn.cursor()
|
||||
txn = LoggingTransaction(
|
||||
txn, name, self.database_engine, after_callbacks
|
||||
txn, name, self.database_engine, after_callbacks,
|
||||
final_callbacks,
|
||||
)
|
||||
r = func(txn, *args, **kwargs)
|
||||
conn.commit()
|
||||
@@ -298,6 +303,7 @@ class SQLBaseStore(object):
|
||||
start_time = time.time() * 1000
|
||||
|
||||
after_callbacks = []
|
||||
final_callbacks = []
|
||||
|
||||
def inner_func(conn, *args, **kwargs):
|
||||
with LoggingContext("runInteraction") as context:
|
||||
@@ -309,7 +315,7 @@ class SQLBaseStore(object):
|
||||
|
||||
current_context.copy_to(context)
|
||||
return self._new_transaction(
|
||||
conn, desc, after_callbacks, current_context,
|
||||
conn, desc, after_callbacks, final_callbacks, current_context,
|
||||
func, *args, **kwargs
|
||||
)
|
||||
|
||||
@@ -318,9 +324,13 @@ class SQLBaseStore(object):
|
||||
result = yield self._db_pool.runWithConnection(
|
||||
inner_func, *args, **kwargs
|
||||
)
|
||||
finally:
|
||||
|
||||
for after_callback, after_args, after_kwargs in after_callbacks:
|
||||
after_callback(*after_args, **after_kwargs)
|
||||
finally:
|
||||
for after_callback, after_args, after_kwargs in final_callbacks:
|
||||
after_callback(*after_args, **after_kwargs)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -425,6 +435,11 @@ class SQLBaseStore(object):
|
||||
|
||||
txn.execute(sql, vals)
|
||||
|
||||
def _simple_insert_many(self, table, values, desc):
|
||||
return self.runInteraction(
|
||||
desc, self._simple_insert_many_txn, table, values
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _simple_insert_many_txn(txn, table, values):
|
||||
if not values:
|
||||
@@ -936,7 +951,7 @@ class SQLBaseStore(object):
|
||||
# __exit__ called after the transaction finishes.
|
||||
ctx = self._cache_id_gen.get_next()
|
||||
stream_id = ctx.__enter__()
|
||||
txn.call_after(ctx.__exit__, None, None, None)
|
||||
txn.call_finally(ctx.__exit__, None, None, None)
|
||||
txn.call_after(self.hs.get_notifier().on_new_replication_data)
|
||||
|
||||
self._simple_insert_txn(
|
||||
|
||||
@@ -308,3 +308,16 @@ class AccountDataStore(SQLBaseStore):
|
||||
" WHERE stream_id < ?"
|
||||
)
|
||||
txn.execute(update_max_id_sql, (next_id, next_id))
|
||||
|
||||
@cachedInlineCallbacks(num_args=2, cache_context=True, max_entries=5000)
|
||||
def is_ignored_by(self, ignored_user_id, ignorer_user_id, cache_context):
|
||||
ignored_account_data = yield self.get_global_account_data_by_type_for_user(
|
||||
"m.ignored_user_list", ignorer_user_id,
|
||||
on_invalidate=cache_context.invalidate,
|
||||
)
|
||||
if not ignored_account_data:
|
||||
defer.returnValue(False)
|
||||
|
||||
defer.returnValue(
|
||||
ignored_user_id in ignored_account_data.get("ignored_users", {})
|
||||
)
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
import simplejson as json
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -26,6 +27,25 @@ from ._base import SQLBaseStore
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _make_exclusive_regex(services_cache):
|
||||
# We precompie a regex constructed from all the regexes that the AS's
|
||||
# have registered for exclusive users.
|
||||
exclusive_user_regexes = [
|
||||
regex.pattern
|
||||
for service in services_cache
|
||||
for regex in service.get_exlusive_user_regexes()
|
||||
]
|
||||
if exclusive_user_regexes:
|
||||
exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes)
|
||||
exclusive_user_regex = re.compile(exclusive_user_regex)
|
||||
else:
|
||||
# We handle this case specially otherwise the constructed regex
|
||||
# will always match
|
||||
exclusive_user_regex = None
|
||||
|
||||
return exclusive_user_regex
|
||||
|
||||
|
||||
class ApplicationServiceStore(SQLBaseStore):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -35,17 +55,18 @@ class ApplicationServiceStore(SQLBaseStore):
|
||||
hs.hostname,
|
||||
hs.config.app_service_config_files
|
||||
)
|
||||
self.exclusive_user_regex = _make_exclusive_regex(self.services_cache)
|
||||
|
||||
def get_app_services(self):
|
||||
return self.services_cache
|
||||
|
||||
def get_if_app_services_interested_in_user(self, user_id):
|
||||
"""Check if the user is one associated with an app service
|
||||
"""Check if the user is one associated with an app service (exclusively)
|
||||
"""
|
||||
for service in self.services_cache:
|
||||
if service.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
if self.exclusive_user_regex:
|
||||
return bool(self.exclusive_user_regex.match(user_id))
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_app_service_by_user_id(self, user_id):
|
||||
"""Retrieve an application service from their user ID.
|
||||
|
||||
@@ -15,11 +15,14 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from ._base import Cache
|
||||
from . import background_updates
|
||||
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Number of msec of granularity to store the user IP 'last seen' time. Smaller
|
||||
@@ -33,7 +36,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
self.client_ip_last_seen = Cache(
|
||||
name="client_ip_last_seen",
|
||||
keylen=4,
|
||||
max_entries=5000,
|
||||
max_entries=50000 * CACHE_SIZE_FACTOR,
|
||||
)
|
||||
|
||||
super(ClientIpStore, self).__init__(hs)
|
||||
@@ -45,10 +48,19 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
columns=["user_id", "device_id", "last_seen"],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def insert_client_ip(self, user, access_token, ip, user_agent, device_id):
|
||||
now = int(self._clock.time_msec())
|
||||
key = (user.to_string(), access_token, ip)
|
||||
# (user_id, access_token, ip) -> (user_agent, device_id, last_seen)
|
||||
self._batch_row_update = {}
|
||||
|
||||
self._client_ip_looper = self._clock.looping_call(
|
||||
self._update_client_ips_batch, 5 * 1000
|
||||
)
|
||||
reactor.addSystemEventTrigger("before", "shutdown", self._update_client_ips_batch)
|
||||
|
||||
def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
|
||||
now=None):
|
||||
if not now:
|
||||
now = int(self._clock.time_msec())
|
||||
key = (user_id, access_token, ip)
|
||||
|
||||
try:
|
||||
last_seen = self.client_ip_last_seen.get(key)
|
||||
@@ -57,34 +69,48 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
|
||||
# Rate-limited inserts
|
||||
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
|
||||
defer.returnValue(None)
|
||||
return
|
||||
|
||||
self.client_ip_last_seen.prefill(key, now)
|
||||
|
||||
# It's safe not to lock here: a) no unique constraint,
|
||||
# b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
|
||||
yield self._simple_upsert(
|
||||
"user_ips",
|
||||
keyvalues={
|
||||
"user_id": user.to_string(),
|
||||
"access_token": access_token,
|
||||
"ip": ip,
|
||||
"user_agent": user_agent,
|
||||
"device_id": device_id,
|
||||
},
|
||||
values={
|
||||
"last_seen": now,
|
||||
},
|
||||
desc="insert_client_ip",
|
||||
lock=False,
|
||||
self._batch_row_update[key] = (user_agent, device_id, now)
|
||||
|
||||
def _update_client_ips_batch(self):
|
||||
to_update = self._batch_row_update
|
||||
self._batch_row_update = {}
|
||||
return self.runInteraction(
|
||||
"_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
|
||||
)
|
||||
|
||||
def _update_client_ips_batch_txn(self, txn, to_update):
|
||||
self.database_engine.lock_table(txn, "user_ips")
|
||||
|
||||
for entry in to_update.iteritems():
|
||||
(user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
|
||||
|
||||
self._simple_upsert_txn(
|
||||
txn,
|
||||
table="user_ips",
|
||||
keyvalues={
|
||||
"user_id": user_id,
|
||||
"access_token": access_token,
|
||||
"ip": ip,
|
||||
"user_agent": user_agent,
|
||||
"device_id": device_id,
|
||||
},
|
||||
values={
|
||||
"last_seen": last_seen,
|
||||
},
|
||||
lock=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_last_client_ip_by_device(self, devices):
|
||||
def get_last_client_ip_by_device(self, user_id, device_id):
|
||||
"""For each device_id listed, give the user_ip it was last seen on
|
||||
|
||||
Args:
|
||||
devices (iterable[(str, str)]): list of (user_id, device_id) pairs
|
||||
user_id (str)
|
||||
device_id (str): If None fetches all devices for the user
|
||||
|
||||
Returns:
|
||||
defer.Deferred: resolves to a dict, where the keys
|
||||
@@ -95,6 +121,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
res = yield self.runInteraction(
|
||||
"get_last_client_ip_by_device",
|
||||
self._get_last_client_ip_by_device_txn,
|
||||
user_id, device_id,
|
||||
retcols=(
|
||||
"user_id",
|
||||
"access_token",
|
||||
@@ -103,23 +130,34 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
"device_id",
|
||||
"last_seen",
|
||||
),
|
||||
devices=devices
|
||||
)
|
||||
|
||||
ret = {(d["user_id"], d["device_id"]): d for d in res}
|
||||
for key in self._batch_row_update:
|
||||
uid, access_token, ip = key
|
||||
if uid == user_id:
|
||||
user_agent, did, last_seen = self._batch_row_update[key]
|
||||
if not device_id or did == device_id:
|
||||
ret[(user_id, device_id)] = {
|
||||
"user_id": user_id,
|
||||
"access_token": access_token,
|
||||
"ip": ip,
|
||||
"user_agent": user_agent,
|
||||
"device_id": did,
|
||||
"last_seen": last_seen,
|
||||
}
|
||||
defer.returnValue(ret)
|
||||
|
||||
@classmethod
|
||||
def _get_last_client_ip_by_device_txn(cls, txn, devices, retcols):
|
||||
def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols):
|
||||
where_clauses = []
|
||||
bindings = []
|
||||
for (user_id, device_id) in devices:
|
||||
if device_id is None:
|
||||
where_clauses.append("(user_id = ? AND device_id IS NULL)")
|
||||
bindings.extend((user_id, ))
|
||||
else:
|
||||
where_clauses.append("(user_id = ? AND device_id = ?)")
|
||||
bindings.extend((user_id, device_id))
|
||||
if device_id is None:
|
||||
where_clauses.append("user_id = ?")
|
||||
bindings.extend((user_id, ))
|
||||
else:
|
||||
where_clauses.append("(user_id = ? AND device_id = ?)")
|
||||
bindings.extend((user_id, device_id))
|
||||
|
||||
if not where_clauses:
|
||||
return []
|
||||
@@ -147,3 +185,37 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
|
||||
txn.execute(sql, bindings)
|
||||
return cls.cursor_to_dict(txn)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_ip_and_agents(self, user):
|
||||
user_id = user.to_string()
|
||||
results = {}
|
||||
|
||||
for key in self._batch_row_update:
|
||||
uid, access_token, ip = key
|
||||
if uid == user_id:
|
||||
user_agent, _, last_seen = self._batch_row_update[key]
|
||||
results[(access_token, ip)] = (user_agent, last_seen)
|
||||
|
||||
rows = yield self._simple_select_list(
|
||||
table="user_ips",
|
||||
keyvalues={"user_id": user_id},
|
||||
retcols=[
|
||||
"access_token", "ip", "user_agent", "last_seen"
|
||||
],
|
||||
desc="get_user_ip_and_agents",
|
||||
)
|
||||
|
||||
results.update(
|
||||
((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"]))
|
||||
for row in rows
|
||||
)
|
||||
defer.returnValue(list(
|
||||
{
|
||||
"access_token": access_token,
|
||||
"ip": ip,
|
||||
"user_agent": user_agent,
|
||||
"last_seen": last_seen,
|
||||
}
|
||||
for (access_token, ip), (user_agent, last_seen) in results.iteritems()
|
||||
))
|
||||
|
||||
@@ -368,7 +368,7 @@ class DeviceStore(SQLBaseStore):
|
||||
|
||||
prev_sent_id_sql = """
|
||||
SELECT coalesce(max(stream_id), 0) as stream_id
|
||||
FROM device_lists_outbound_pokes
|
||||
FROM device_lists_outbound_last_success
|
||||
WHERE destination = ? AND user_id = ? AND stream_id <= ?
|
||||
"""
|
||||
|
||||
@@ -510,32 +510,43 @@ class DeviceStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
def _mark_as_sent_devices_by_remote_txn(self, txn, destination, stream_id):
|
||||
# First we DELETE all rows such that only the latest row for each
|
||||
# (destination, user_id is left. We do this by selecting first and
|
||||
# deleting.
|
||||
# We update the device_lists_outbound_last_success with the successfully
|
||||
# poked users. We do the join to see which users need to be inserted and
|
||||
# which updated.
|
||||
sql = """
|
||||
SELECT user_id, coalesce(max(stream_id), 0) FROM device_lists_outbound_pokes
|
||||
WHERE destination = ? AND stream_id <= ?
|
||||
SELECT user_id, coalesce(max(o.stream_id), 0), (max(s.stream_id) IS NOT NULL)
|
||||
FROM device_lists_outbound_pokes as o
|
||||
LEFT JOIN device_lists_outbound_last_success as s
|
||||
USING (destination, user_id)
|
||||
WHERE destination = ? AND o.stream_id <= ?
|
||||
GROUP BY user_id
|
||||
HAVING count(*) > 1
|
||||
"""
|
||||
txn.execute(sql, (destination, stream_id,))
|
||||
rows = txn.fetchall()
|
||||
|
||||
sql = """
|
||||
DELETE FROM device_lists_outbound_pokes
|
||||
WHERE destination = ? AND user_id = ? AND stream_id < ?
|
||||
UPDATE device_lists_outbound_last_success
|
||||
SET stream_id = ?
|
||||
WHERE destination = ? AND user_id = ?
|
||||
"""
|
||||
txn.executemany(
|
||||
sql, ((destination, row[0], row[1],) for row in rows)
|
||||
sql, ((row[1], destination, row[0],) for row in rows if row[2])
|
||||
)
|
||||
|
||||
# Mark everything that is left as sent
|
||||
sql = """
|
||||
UPDATE device_lists_outbound_pokes SET sent = ?
|
||||
INSERT INTO device_lists_outbound_last_success
|
||||
(destination, user_id, stream_id) VALUES (?, ?, ?)
|
||||
"""
|
||||
txn.executemany(
|
||||
sql, ((destination, row[0], row[1],) for row in rows if not row[2])
|
||||
)
|
||||
|
||||
# Delete all sent outbound pokes
|
||||
sql = """
|
||||
DELETE FROM device_lists_outbound_pokes
|
||||
WHERE destination = ? AND stream_id <= ?
|
||||
"""
|
||||
txn.execute(sql, (True, destination, stream_id,))
|
||||
txn.execute(sql, (destination, stream_id,))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_whose_devices_changed(self, from_key):
|
||||
@@ -670,6 +681,14 @@ class DeviceStore(SQLBaseStore):
|
||||
)
|
||||
)
|
||||
|
||||
# Since we've deleted unsent deltas, we need to remove the entry
|
||||
# of last successful sent so that the prev_ids are correctly set.
|
||||
sql = """
|
||||
DELETE FROM device_lists_outbound_last_success
|
||||
WHERE destination = ? AND user_id = ?
|
||||
"""
|
||||
txn.executemany(sql, ((row[0], row[1]) for row in rows))
|
||||
|
||||
logger.info("Pruned %d device list outbound pokes", txn.rowcount)
|
||||
|
||||
return self.runInteraction(
|
||||
|
||||
@@ -170,3 +170,17 @@ class DirectoryStore(SQLBaseStore):
|
||||
"room_alias",
|
||||
desc="get_aliases_for_room",
|
||||
)
|
||||
|
||||
def update_aliases_for_room(self, old_room_id, new_room_id, creator):
|
||||
def _update_aliases_for_room_txn(txn):
|
||||
sql = "UPDATE room_aliases SET room_id = ?, creator = ? WHERE room_id = ?"
|
||||
txn.execute(sql, (new_room_id, creator, old_room_id,))
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_aliases_for_room, (old_room_id,)
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_aliases_for_room, (new_room_id,)
|
||||
)
|
||||
return self.runInteraction(
|
||||
"_update_aliases_for_room_txn", _update_aliases_for_room_txn
|
||||
)
|
||||
|
||||
@@ -185,8 +185,8 @@ class EndToEndKeyStore(SQLBaseStore):
|
||||
for algorithm, key_id, json_bytes in new_keys
|
||||
],
|
||||
)
|
||||
txn.call_after(
|
||||
self.count_e2e_one_time_keys.invalidate, (user_id, device_id,)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.count_e2e_one_time_keys, (user_id, device_id,)
|
||||
)
|
||||
yield self.runInteraction(
|
||||
"add_e2e_one_time_keys_insert", _add_e2e_one_time_keys
|
||||
@@ -237,24 +237,29 @@ class EndToEndKeyStore(SQLBaseStore):
|
||||
)
|
||||
for user_id, device_id, algorithm, key_id in delete:
|
||||
txn.execute(sql, (user_id, device_id, algorithm, key_id))
|
||||
txn.call_after(
|
||||
self.count_e2e_one_time_keys.invalidate, (user_id, device_id,)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.count_e2e_one_time_keys, (user_id, device_id,)
|
||||
)
|
||||
return result
|
||||
return self.runInteraction(
|
||||
"claim_e2e_one_time_keys", _claim_e2e_one_time_keys
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_e2e_keys_by_device(self, user_id, device_id):
|
||||
yield self._simple_delete(
|
||||
table="e2e_device_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
desc="delete_e2e_device_keys_by_device"
|
||||
def delete_e2e_keys_by_device_txn(txn):
|
||||
self._simple_delete_txn(
|
||||
txn,
|
||||
table="e2e_device_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self._simple_delete_txn(
|
||||
txn,
|
||||
table="e2e_one_time_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.count_e2e_one_time_keys, (user_id, device_id,)
|
||||
)
|
||||
return self.runInteraction(
|
||||
"delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn
|
||||
)
|
||||
yield self._simple_delete(
|
||||
table="e2e_one_time_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
desc="delete_e2e_one_time_keys_by_device"
|
||||
)
|
||||
self.count_e2e_one_time_keys.invalidate((user_id, device_id,))
|
||||
|
||||
@@ -37,25 +37,55 @@ class EventFederationStore(SQLBaseStore):
|
||||
and backfilling from another server respectively.
|
||||
"""
|
||||
|
||||
EVENT_AUTH_STATE_ONLY = "event_auth_state_only"
|
||||
|
||||
def __init__(self, hs):
|
||||
super(EventFederationStore, self).__init__(hs)
|
||||
|
||||
self.register_background_update_handler(
|
||||
self.EVENT_AUTH_STATE_ONLY,
|
||||
self._background_delete_non_state_event_auth,
|
||||
)
|
||||
|
||||
hs.get_clock().looping_call(
|
||||
self._delete_old_forward_extrem_cache, 60 * 60 * 1000
|
||||
)
|
||||
|
||||
def get_auth_chain(self, event_ids):
|
||||
return self.get_auth_chain_ids(event_ids).addCallback(self._get_events)
|
||||
def get_auth_chain(self, event_ids, include_given=False):
|
||||
"""Get auth events for given event_ids. The events *must* be state events.
|
||||
|
||||
def get_auth_chain_ids(self, event_ids):
|
||||
Args:
|
||||
event_ids (list): state events
|
||||
include_given (bool): include the given events in result
|
||||
|
||||
Returns:
|
||||
list of events
|
||||
"""
|
||||
return self.get_auth_chain_ids(
|
||||
event_ids, include_given=include_given,
|
||||
).addCallback(self._get_events)
|
||||
|
||||
def get_auth_chain_ids(self, event_ids, include_given=False):
|
||||
"""Get auth events for given event_ids. The events *must* be state events.
|
||||
|
||||
Args:
|
||||
event_ids (list): state events
|
||||
include_given (bool): include the given events in result
|
||||
|
||||
Returns:
|
||||
list of event_ids
|
||||
"""
|
||||
return self.runInteraction(
|
||||
"get_auth_chain_ids",
|
||||
self._get_auth_chain_ids_txn,
|
||||
event_ids
|
||||
event_ids, include_given
|
||||
)
|
||||
|
||||
def _get_auth_chain_ids_txn(self, txn, event_ids):
|
||||
results = set()
|
||||
def _get_auth_chain_ids_txn(self, txn, event_ids, include_given):
|
||||
if include_given:
|
||||
results = set(event_ids)
|
||||
else:
|
||||
results = set()
|
||||
|
||||
base_sql = (
|
||||
"SELECT auth_id FROM event_auth WHERE event_id IN (%s)"
|
||||
@@ -504,3 +534,52 @@ class EventFederationStore(SQLBaseStore):
|
||||
|
||||
txn.execute(query, (room_id,))
|
||||
txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _background_delete_non_state_event_auth(self, progress, batch_size):
|
||||
def delete_event_auth(txn):
|
||||
target_min_stream_id = progress.get("target_min_stream_id_inclusive")
|
||||
max_stream_id = progress.get("max_stream_id_exclusive")
|
||||
|
||||
if not target_min_stream_id or not max_stream_id:
|
||||
txn.execute("SELECT COALESCE(MIN(stream_ordering), 0) FROM events")
|
||||
rows = txn.fetchall()
|
||||
target_min_stream_id = rows[0][0]
|
||||
|
||||
txn.execute("SELECT COALESCE(MAX(stream_ordering), 0) FROM events")
|
||||
rows = txn.fetchall()
|
||||
max_stream_id = rows[0][0]
|
||||
|
||||
min_stream_id = max_stream_id - batch_size
|
||||
|
||||
sql = """
|
||||
DELETE FROM event_auth
|
||||
WHERE event_id IN (
|
||||
SELECT event_id FROM events
|
||||
LEFT JOIN state_events USING (room_id, event_id)
|
||||
WHERE ? <= stream_ordering AND stream_ordering < ?
|
||||
AND state_key IS null
|
||||
)
|
||||
"""
|
||||
|
||||
txn.execute(sql, (min_stream_id, max_stream_id,))
|
||||
|
||||
new_progress = {
|
||||
"target_min_stream_id_inclusive": target_min_stream_id,
|
||||
"max_stream_id_exclusive": min_stream_id,
|
||||
}
|
||||
|
||||
self._background_update_progress_txn(
|
||||
txn, self.EVENT_AUTH_STATE_ONLY, new_progress
|
||||
)
|
||||
|
||||
return min_stream_id >= target_min_stream_id
|
||||
|
||||
result = yield self.runInteraction(
|
||||
self.EVENT_AUTH_STATE_ONLY, delete_event_auth
|
||||
)
|
||||
|
||||
if not result:
|
||||
yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY)
|
||||
|
||||
defer.returnValue(batch_size)
|
||||
|
||||
@@ -38,7 +38,6 @@ from functools import wraps
|
||||
import synapse.metrics
|
||||
|
||||
import logging
|
||||
import math
|
||||
import ujson as json
|
||||
|
||||
# these are only included to make the type annotations work
|
||||
@@ -404,6 +403,11 @@ class EventsStore(SQLBaseStore):
|
||||
(room_id, ), new_state
|
||||
)
|
||||
|
||||
for room_id, latest_event_ids in new_forward_extremeties.iteritems():
|
||||
self.get_latest_event_ids_in_room.prefill(
|
||||
(room_id,), list(latest_event_ids)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids):
|
||||
"""Calculates the new forward extremeties for a room given events to
|
||||
@@ -648,9 +652,10 @@ class EventsStore(SQLBaseStore):
|
||||
list of the event ids which are the forward extremities.
|
||||
|
||||
"""
|
||||
self._update_current_state_txn(txn, current_state_for_room)
|
||||
|
||||
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
|
||||
|
||||
self._update_current_state_txn(txn, current_state_for_room, max_stream_order)
|
||||
|
||||
self._update_forward_extremities_txn(
|
||||
txn,
|
||||
new_forward_extremities=new_forward_extremeties,
|
||||
@@ -713,7 +718,7 @@ class EventsStore(SQLBaseStore):
|
||||
backfilled=backfilled,
|
||||
)
|
||||
|
||||
def _update_current_state_txn(self, txn, state_delta_by_room):
|
||||
def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order):
|
||||
for room_id, current_state_tuple in state_delta_by_room.iteritems():
|
||||
to_delete, to_insert, _ = current_state_tuple
|
||||
txn.executemany(
|
||||
@@ -735,6 +740,29 @@ class EventsStore(SQLBaseStore):
|
||||
],
|
||||
)
|
||||
|
||||
state_deltas = {key: None for key in to_delete}
|
||||
state_deltas.update(to_insert)
|
||||
|
||||
self._simple_insert_many_txn(
|
||||
txn,
|
||||
table="current_state_delta_stream",
|
||||
values=[
|
||||
{
|
||||
"stream_id": max_stream_order,
|
||||
"room_id": room_id,
|
||||
"type": key[0],
|
||||
"state_key": key[1],
|
||||
"event_id": ev_id,
|
||||
"prev_event_id": to_delete.get(key, None),
|
||||
}
|
||||
for key, ev_id in state_deltas.iteritems()
|
||||
]
|
||||
)
|
||||
|
||||
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||
room_id, max_stream_order,
|
||||
)
|
||||
|
||||
# Invalidate the various caches
|
||||
|
||||
# Figure out the changes of membership to invalidate the
|
||||
@@ -743,11 +771,7 @@ class EventsStore(SQLBaseStore):
|
||||
# and which we have added, then we invlidate the caches for all
|
||||
# those users.
|
||||
members_changed = set(
|
||||
state_key for ev_type, state_key in to_delete.iterkeys()
|
||||
if ev_type == EventTypes.Member
|
||||
)
|
||||
members_changed.update(
|
||||
state_key for ev_type, state_key in to_insert.iterkeys()
|
||||
state_key for ev_type, state_key in state_deltas
|
||||
if ev_type == EventTypes.Member
|
||||
)
|
||||
|
||||
@@ -756,6 +780,11 @@ class EventsStore(SQLBaseStore):
|
||||
txn, self.get_rooms_for_user, (member,)
|
||||
)
|
||||
|
||||
for host in set(get_domain_from_id(u) for u in members_changed):
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.is_host_joined, (room_id, host)
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_users_in_room, (room_id,)
|
||||
)
|
||||
@@ -1120,6 +1149,7 @@ class EventsStore(SQLBaseStore):
|
||||
}
|
||||
for event, _ in events_and_contexts
|
||||
for auth_id, _ in event.auth_events
|
||||
if event.is_state()
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1419,7 +1449,7 @@ class EventsStore(SQLBaseStore):
|
||||
]
|
||||
|
||||
rows = self._new_transaction(
|
||||
conn, "do_fetch", [], None, self._fetch_event_rows, event_ids
|
||||
conn, "do_fetch", [], [], None, self._fetch_event_rows, event_ids
|
||||
)
|
||||
|
||||
row_dict = {
|
||||
@@ -1599,68 +1629,54 @@ class EventsStore(SQLBaseStore):
|
||||
call to this function, it will return None.
|
||||
"""
|
||||
def _count_messages(txn):
|
||||
now = self.hs.get_clock().time()
|
||||
|
||||
txn.execute(
|
||||
"SELECT reported_stream_token, reported_time FROM stats_reporting"
|
||||
)
|
||||
last_reported = self.cursor_to_dict(txn)
|
||||
|
||||
txn.execute(
|
||||
"SELECT stream_ordering"
|
||||
" FROM events"
|
||||
" ORDER BY stream_ordering DESC"
|
||||
" LIMIT 1"
|
||||
)
|
||||
now_reporting = self.cursor_to_dict(txn)
|
||||
if not now_reporting:
|
||||
logger.info("Calculating daily messages skipped; no now_reporting")
|
||||
return None
|
||||
now_reporting = now_reporting[0]["stream_ordering"]
|
||||
|
||||
txn.execute("DELETE FROM stats_reporting")
|
||||
txn.execute(
|
||||
"INSERT INTO stats_reporting"
|
||||
" (reported_stream_token, reported_time)"
|
||||
" VALUES (?, ?)",
|
||||
(now_reporting, now,)
|
||||
)
|
||||
|
||||
if not last_reported:
|
||||
logger.info("Calculating daily messages skipped; no last_reported")
|
||||
return None
|
||||
|
||||
# Close enough to correct for our purposes.
|
||||
yesterday = (now - 24 * 60 * 60)
|
||||
since_yesterday_seconds = yesterday - last_reported[0]["reported_time"]
|
||||
any_since_yesterday = math.fabs(since_yesterday_seconds) > 60 * 60
|
||||
if any_since_yesterday:
|
||||
logger.info(
|
||||
"Calculating daily messages skipped; since_yesterday_seconds: %d" %
|
||||
(since_yesterday_seconds,)
|
||||
)
|
||||
return None
|
||||
|
||||
txn.execute(
|
||||
"SELECT COUNT(*) as messages"
|
||||
" FROM events NATURAL JOIN event_json"
|
||||
" WHERE json like '%m.room.message%'"
|
||||
" AND stream_ordering > ?"
|
||||
" AND stream_ordering <= ?",
|
||||
(
|
||||
last_reported[0]["reported_stream_token"],
|
||||
now_reporting,
|
||||
)
|
||||
)
|
||||
rows = self.cursor_to_dict(txn)
|
||||
if not rows:
|
||||
logger.info("Calculating daily messages skipped; messages count missing")
|
||||
return None
|
||||
return rows[0]["messages"]
|
||||
sql = """
|
||||
SELECT COALESCE(COUNT(*), 0) FROM events
|
||||
WHERE type = 'm.room.message'
|
||||
AND stream_ordering > ?
|
||||
"""
|
||||
txn.execute(sql, (self.stream_ordering_day_ago,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_messages", _count_messages)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def count_daily_sent_messages(self):
|
||||
def _count_messages(txn):
|
||||
# This is good enough as if you have silly characters in your own
|
||||
# hostname then thats your own fault.
|
||||
like_clause = "%:" + self.hs.hostname
|
||||
|
||||
sql = """
|
||||
SELECT COALESCE(COUNT(*), 0) FROM events
|
||||
WHERE type = 'm.room.message'
|
||||
AND sender LIKE ?
|
||||
AND stream_ordering > ?
|
||||
"""
|
||||
|
||||
txn.execute(sql, (like_clause, self.stream_ordering_day_ago,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def count_daily_active_rooms(self):
|
||||
def _count(txn):
|
||||
sql = """
|
||||
SELECT COALESCE(COUNT(DISTINCT room_id), 0) FROM events
|
||||
WHERE type = 'm.room.message'
|
||||
AND stream_ordering > ?
|
||||
"""
|
||||
txn.execute(sql, (self.stream_ordering_day_ago,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_daily_active_rooms", _count)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _background_reindex_fields_sender(self, progress, batch_size):
|
||||
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
||||
@@ -2258,6 +2274,24 @@ class EventsStore(SQLBaseStore):
|
||||
|
||||
defer.returnValue((int(res["topological_ordering"]), int(res["stream_ordering"])))
|
||||
|
||||
def get_max_current_state_delta_stream_id(self):
|
||||
return self._stream_id_gen.get_current_token()
|
||||
|
||||
def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
|
||||
def get_all_updated_current_state_deltas_txn(txn):
|
||||
sql = """
|
||||
SELECT stream_id, room_id, type, state_key, event_id
|
||||
FROM current_state_delta_stream
|
||||
WHERE ? < stream_id AND stream_id <= ?
|
||||
ORDER BY stream_id ASC LIMIT ?
|
||||
"""
|
||||
txn.execute(sql, (from_token, to_token, limit))
|
||||
return txn.fetchall()
|
||||
return self.runInteraction(
|
||||
"get_all_updated_current_state_deltas",
|
||||
get_all_updated_current_state_deltas_txn,
|
||||
)
|
||||
|
||||
|
||||
AllNewEventsResult = namedtuple("AllNewEventsResult", [
|
||||
"new_forward_events", "new_backfill_events",
|
||||
|
||||
@@ -19,6 +19,7 @@ from ._base import SQLBaseStore
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
import simplejson as json
|
||||
|
||||
|
||||
@@ -46,11 +47,20 @@ class FilteringStore(SQLBaseStore):
|
||||
defer.returnValue(json.loads(str(def_json).decode("utf-8")))
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
def_json = json.dumps(user_filter).encode("utf-8")
|
||||
def_json = encode_canonical_json(user_filter)
|
||||
|
||||
# Need an atomic transaction to SELECT the maximal ID so far then
|
||||
# INSERT a new one
|
||||
def _do_txn(txn):
|
||||
sql = (
|
||||
"SELECT filter_id FROM user_filters "
|
||||
"WHERE user_id = ? AND filter_json = ?"
|
||||
)
|
||||
txn.execute(sql, (user_localpart, def_json))
|
||||
filter_id_response = txn.fetchone()
|
||||
if filter_id_response is not None:
|
||||
return filter_id_response[0]
|
||||
|
||||
sql = (
|
||||
"SELECT MAX(filter_id) FROM user_filters "
|
||||
"WHERE user_id = ?"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user