Compare commits
250 Commits
neilj/cont
...
travis/nul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
177f2b838c | ||
|
|
f9d7d3aa89 | ||
|
|
1a94de60e8 | ||
|
|
73f1de31d1 | ||
|
|
3d5bba581b | ||
|
|
006bd8f4f6 | ||
|
|
c31e375ade | ||
|
|
62388a1e44 | ||
|
|
ae5521be9c | ||
|
|
8031a6f3d5 | ||
|
|
66b75e2d81 | ||
|
|
2dfbeea66f | ||
|
|
b898a5600a | ||
|
|
e26e6b3230 | ||
|
|
4a30e4acb4 | ||
|
|
f3ff64e000 | ||
|
|
f4c80d70f8 | ||
|
|
9526aa96a6 | ||
|
|
9259cd4bee | ||
|
|
8aed6d87ff | ||
|
|
959550b645 | ||
|
|
44b8ba484e | ||
|
|
17f6804837 | ||
|
|
c4aef549ad | ||
|
|
bab3eddac4 | ||
|
|
6a5a70edf0 | ||
|
|
384122efa8 | ||
|
|
04d53794d6 | ||
|
|
5ceee46c6b | ||
|
|
0620dd49db | ||
|
|
c7ec06e8a6 | ||
|
|
24b93b9c76 | ||
|
|
5206648a4a | ||
|
|
edef6d29ae | ||
|
|
d642178654 | ||
|
|
1dff859d6a | ||
|
|
57ba3451b6 | ||
|
|
06671057b6 | ||
|
|
9ad246e6d2 | ||
|
|
2ac9c965dd | ||
|
|
935af0da38 | ||
|
|
210cb6dae2 | ||
|
|
3787133c9e | ||
|
|
99c4ec1eef | ||
|
|
ad5b4074e1 | ||
|
|
b63cc325a9 | ||
|
|
d4ca533d70 | ||
|
|
291e1eea5e | ||
|
|
85ece3df46 | ||
|
|
8dd9cca8ea | ||
|
|
5dbff34509 | ||
|
|
ce5bcefc60 | ||
|
|
afb463fb7a | ||
|
|
da5ef0bb42 | ||
|
|
7ce1f97a13 | ||
|
|
fdeac1e984 | ||
|
|
f89f688a55 | ||
|
|
07cff7b121 | ||
|
|
d46aab3fa8 | ||
|
|
5c39d262c0 | ||
|
|
895179a4dc | ||
|
|
8f9ce1a8a2 | ||
|
|
cc8c139a39 | ||
|
|
a5fe16c5a7 | ||
|
|
efdc55db75 | ||
|
|
54a582ed44 | ||
|
|
cd32375846 | ||
|
|
7a7eba8302 | ||
|
|
2c662ddde4 | ||
|
|
95f3fcda3c | ||
|
|
4a6d5de98c | ||
|
|
fafb936de5 | ||
|
|
b5c62c6b26 | ||
|
|
33453419b0 | ||
|
|
a0603523d2 | ||
|
|
f201a30244 | ||
|
|
cd0faba7cd | ||
|
|
f1e5b41388 | ||
|
|
5f027a315f | ||
|
|
5be34fc3e3 | ||
|
|
e6459c26b4 | ||
|
|
1757e2d7c3 | ||
|
|
13018bb997 | ||
|
|
4a926f528e | ||
|
|
5fb72e6888 | ||
|
|
b50641e357 | ||
|
|
efe3c7977a | ||
|
|
a9fc71c372 | ||
|
|
7155162844 | ||
|
|
54d77107c1 | ||
|
|
0aba6c8251 | ||
|
|
d94544051b | ||
|
|
99c7dae087 | ||
|
|
8ed2f182f7 | ||
|
|
52ddc6c0ed | ||
|
|
efefb5bda2 | ||
|
|
6ca88c4693 | ||
|
|
daa2fb6317 | ||
|
|
495e859e58 | ||
|
|
db3046f565 | ||
|
|
dc4f6d1b01 | ||
|
|
ae69a6aa9d | ||
|
|
53788a447f | ||
|
|
4fb44fb5b9 | ||
|
|
a80e6b53f9 | ||
|
|
b54b03f9e1 | ||
|
|
df2ebd75d3 | ||
|
|
5a4b328f52 | ||
|
|
2e1129b5f7 | ||
|
|
822072b1bb | ||
|
|
516a5fb64b | ||
|
|
8cc9ba3522 | ||
|
|
9e99143c47 | ||
|
|
8782bfb783 | ||
|
|
2725cd2290 | ||
|
|
1a536699fd | ||
|
|
bb93757b32 | ||
|
|
9a18e1d832 | ||
|
|
2f48c4e1ae | ||
|
|
c9f811c5d4 | ||
|
|
04299132af | ||
|
|
7a3eb8657d | ||
|
|
9c61dce3c8 | ||
|
|
a18f93279e | ||
|
|
8714ff6d51 | ||
|
|
c2bb7476c9 | ||
|
|
085ae346ac | ||
|
|
a78996cc4a | ||
|
|
cd3f30014a | ||
|
|
ee90c06e38 | ||
|
|
b36c82576e | ||
|
|
d9a02d1201 | ||
|
|
ea41c740ee | ||
|
|
84cebb89cc | ||
|
|
130f932cbc | ||
|
|
11ea16777f | ||
|
|
d216a36b37 | ||
|
|
c0e0740bef | ||
|
|
c8c069db92 | ||
|
|
1473058b5e | ||
|
|
de655e669a | ||
|
|
59e2d2694d | ||
|
|
3fdff14207 | ||
|
|
4804206dbe | ||
|
|
836d3adcce | ||
|
|
9b86d3dee6 | ||
|
|
fa21455e08 | ||
|
|
0b5cf95607 | ||
|
|
dd76e5ca62 | ||
|
|
5485852b43 | ||
|
|
ecc0967315 | ||
|
|
e3281d7d26 | ||
|
|
f73f18fe7b | ||
|
|
863ec09622 | ||
|
|
a845abbf3a | ||
|
|
1565ebec2c | ||
|
|
1acfb9e9f0 | ||
|
|
1a7104fde3 | ||
|
|
60c3635f05 | ||
|
|
247dc1bd0b | ||
|
|
176f31c2e3 | ||
|
|
12f9d51e82 | ||
|
|
c193b39134 | ||
|
|
84196cb231 | ||
|
|
0836cbb9f5 | ||
|
|
1df2f80367 | ||
|
|
f203c98794 | ||
|
|
cc4bd762df | ||
|
|
03ad6bd483 | ||
|
|
40e576e29c | ||
|
|
8e9ca83537 | ||
|
|
579b637b6c | ||
|
|
c1799b0f85 | ||
|
|
6aad81ec0c | ||
|
|
803a28fd1d | ||
|
|
031919dafb | ||
|
|
d8e357b7cf | ||
|
|
8c5b1e30d4 | ||
|
|
b31cc1c613 | ||
|
|
d6118c5be6 | ||
|
|
19f0722b2c | ||
|
|
28a81ed62f | ||
|
|
63b75cf7d7 | ||
|
|
bd0d45ca69 | ||
|
|
0962d3cdff | ||
|
|
837d7f85a9 | ||
|
|
fd8fb32bdd | ||
|
|
7ca638c761 | ||
|
|
ce6d47934b | ||
|
|
2ebf7d56fa | ||
|
|
e86d74d748 | ||
|
|
afe560b072 | ||
|
|
00714e5102 | ||
|
|
4a9a118a94 | ||
|
|
6824ddd93d | ||
|
|
788163e204 | ||
|
|
6b2b9a58c4 | ||
|
|
3d26eae14a | ||
|
|
1a63c7c281 | ||
|
|
f8826d31cd | ||
|
|
f30a882cc6 | ||
|
|
95c603ae6f | ||
|
|
8383a553a6 | ||
|
|
ca90336a69 | ||
|
|
eaf41a943b | ||
|
|
91934025b9 | ||
|
|
20f0617e87 | ||
|
|
49ff74da9b | ||
|
|
600ec04739 | ||
|
|
fd2fcb817c | ||
|
|
a1eb4c6d2f | ||
|
|
14d5ad7d2b | ||
|
|
ad010f6306 | ||
|
|
3f22e993f0 | ||
|
|
a137f4eac0 | ||
|
|
468b2bcb2e | ||
|
|
38642614cf | ||
|
|
6e27a8620f | ||
|
|
ec638a1602 | ||
|
|
208251956d | ||
|
|
d5adf297e6 | ||
|
|
6b0ddf8ee5 | ||
|
|
caa76e6021 | ||
|
|
e446921def | ||
|
|
329688c161 | ||
|
|
02491e009d | ||
|
|
a0fc256d65 | ||
|
|
bfc8fdf1fc | ||
|
|
747aa9f8ca | ||
|
|
5e45b558b0 | ||
|
|
50d2a3059d | ||
|
|
644b86677f | ||
|
|
4abf5aa81a | ||
|
|
1f1e8dd8ec | ||
|
|
18b69be00f | ||
|
|
0084309cd2 | ||
|
|
f50efcb65d | ||
|
|
f88a9e6323 | ||
|
|
3352baac4b | ||
|
|
b25e387c0d | ||
|
|
67d7b44784 | ||
|
|
2d951686a7 | ||
|
|
7d2a0c848e | ||
|
|
7fc1e17f4c | ||
|
|
b78aac5582 | ||
|
|
6ae9361510 | ||
|
|
ef27d434d1 | ||
|
|
b2d574f126 | ||
|
|
30805237fa | ||
|
|
b43d9a920b |
158
CHANGES.md
158
CHANGES.md
@@ -1,3 +1,161 @@
|
||||
Synapse 0.99.5.1 (2019-05-22)
|
||||
=============================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.99.5 (2019-05-22)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.99.5rc1 (2019-05-21)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add ability to blacklist IP ranges for the federation client. ([\#5043](https://github.com/matrix-org/synapse/issues/5043))
|
||||
- Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. ([\#5181](https://github.com/matrix-org/synapse/issues/5181))
|
||||
- Drop support for the undocumented /_matrix/client/v2_alpha API prefix. ([\#5190](https://github.com/matrix-org/synapse/issues/5190))
|
||||
- Add an option to disable per-room profiles. ([\#5196](https://github.com/matrix-org/synapse/issues/5196))
|
||||
- Stick an expiration date to any registered user missing one at startup if account validity is enabled. ([\#5204](https://github.com/matrix-org/synapse/issues/5204))
|
||||
- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211), [\#5203](https://github.com/matrix-org/synapse/issues/5203), [\#5212](https://github.com/matrix-org/synapse/issues/5212))
|
||||
- Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). ([\#5210](https://github.com/matrix-org/synapse/issues/5210), [\#5217](https://github.com/matrix-org/synapse/issues/5217))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. ([\#5039](https://github.com/matrix-org/synapse/issues/5039))
|
||||
- Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. ([\#5146](https://github.com/matrix-org/synapse/issues/5146))
|
||||
- Re-order stages in registration flows such that msisdn and email verification are done last. ([\#5174](https://github.com/matrix-org/synapse/issues/5174))
|
||||
- Fix 3pid guest invites. ([\#5177](https://github.com/matrix-org/synapse/issues/5177))
|
||||
- Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. ([\#5187](https://github.com/matrix-org/synapse/issues/5187))
|
||||
- Prevent registration for user ids that are too long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198))
|
||||
- Fix incompatibility between ACME support and Python 3.5.2. ([\#5218](https://github.com/matrix-org/synapse/issues/5218))
|
||||
- Fix error handling for rooms whose versions are unknown. ([\#5219](https://github.com/matrix-org/synapse/issues/5219))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. ([\#3484](https://github.com/matrix-org/synapse/issues/3484))
|
||||
- Update tests to consistently be configured via the same code that is used when loading from configuration files. ([\#5171](https://github.com/matrix-org/synapse/issues/5171), [\#5185](https://github.com/matrix-org/synapse/issues/5185))
|
||||
- Allow client event serialization to be async. ([\#5183](https://github.com/matrix-org/synapse/issues/5183))
|
||||
- Expose DataStore._get_events as get_events_as_list. ([\#5184](https://github.com/matrix-org/synapse/issues/5184))
|
||||
- Make generating SQL bounds for pagination generic. ([\#5191](https://github.com/matrix-org/synapse/issues/5191))
|
||||
- Stop telling people to install the optional dependencies by default. ([\#5197](https://github.com/matrix-org/synapse/issues/5197))
|
||||
|
||||
|
||||
Synapse 0.99.4 (2019-05-15)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.99.4rc1 (2019-05-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add systemd-python to the optional dependencies to enable logging to the systemd journal. Install with `pip install matrix-synapse[systemd]`. ([\#4339](https://github.com/matrix-org/synapse/issues/4339))
|
||||
- Add a default .m.rule.tombstone push rule. ([\#4867](https://github.com/matrix-org/synapse/issues/4867))
|
||||
- Add ability for password provider modules to bind email addresses to users upon registration. ([\#4947](https://github.com/matrix-org/synapse/issues/4947))
|
||||
- Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. ([\#4967](https://github.com/matrix-org/synapse/issues/4967))
|
||||
- Remove presence list support as per MSC 1819. ([\#4989](https://github.com/matrix-org/synapse/issues/4989))
|
||||
- Reduce CPU usage starting pushers during start up. ([\#4991](https://github.com/matrix-org/synapse/issues/4991))
|
||||
- Add a delete group admin API. ([\#5002](https://github.com/matrix-org/synapse/issues/5002))
|
||||
- Add config option to block users from looking up 3PIDs. ([\#5010](https://github.com/matrix-org/synapse/issues/5010))
|
||||
- Add context to phonehome stats. ([\#5020](https://github.com/matrix-org/synapse/issues/5020))
|
||||
- Configure the example systemd units to have a log identifier of `matrix-synapse`
|
||||
instead of the executable name, `python`.
|
||||
Contributed by Christoph Müller. ([\#5023](https://github.com/matrix-org/synapse/issues/5023))
|
||||
- Add time-based account expiration. ([\#5027](https://github.com/matrix-org/synapse/issues/5027), [\#5047](https://github.com/matrix-org/synapse/issues/5047), [\#5073](https://github.com/matrix-org/synapse/issues/5073), [\#5116](https://github.com/matrix-org/synapse/issues/5116))
|
||||
- Add support for handling `/versions`, `/voip` and `/push_rules` client endpoints to client_reader worker. ([\#5063](https://github.com/matrix-org/synapse/issues/5063), [\#5065](https://github.com/matrix-org/synapse/issues/5065), [\#5070](https://github.com/matrix-org/synapse/issues/5070))
|
||||
- Add a configuration option to require authentication on /publicRooms and /profile endpoints. ([\#5083](https://github.com/matrix-org/synapse/issues/5083))
|
||||
- Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). ([\#5119](https://github.com/matrix-org/synapse/issues/5119))
|
||||
- Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. ([\#5121](https://github.com/matrix-org/synapse/issues/5121), [\#5142](https://github.com/matrix-org/synapse/issues/5142))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Avoid redundant URL encoding of redirect URL for SSO login in the fallback login page. Fixes a regression introduced in [#4220](https://github.com/matrix-org/synapse/pull/4220). Contributed by Marcel Fabian Krüger ("[zaugin](https://github.com/zauguin)"). ([\#4555](https://github.com/matrix-org/synapse/issues/4555))
|
||||
- Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. ([\#4942](https://github.com/matrix-org/synapse/issues/4942), [\#5103](https://github.com/matrix-org/synapse/issues/5103))
|
||||
- Fix sync bug which made accepting invites unreliable in worker-mode synapses. ([\#4955](https://github.com/matrix-org/synapse/issues/4955), [\#4956](https://github.com/matrix-org/synapse/issues/4956))
|
||||
- start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. ([\#4981](https://github.com/matrix-org/synapse/issues/4981))
|
||||
- Transfer related groups on room upgrade. ([\#4990](https://github.com/matrix-org/synapse/issues/4990))
|
||||
- Prevent the ability to kick users from a room they aren't in. ([\#4999](https://github.com/matrix-org/synapse/issues/4999))
|
||||
- Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud <anders@jensenwaud.com>. ([\#5003](https://github.com/matrix-org/synapse/issues/5003))
|
||||
- Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. ([\#5009](https://github.com/matrix-org/synapse/issues/5009))
|
||||
- Fix "cannot import name execute_batch" error with postgres. ([\#5032](https://github.com/matrix-org/synapse/issues/5032))
|
||||
- Fix disappearing exceptions in manhole. ([\#5035](https://github.com/matrix-org/synapse/issues/5035))
|
||||
- Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. ([\#5037](https://github.com/matrix-org/synapse/issues/5037))
|
||||
- Make sure we're not registering the same 3pid twice on registration. ([\#5071](https://github.com/matrix-org/synapse/issues/5071))
|
||||
- Don't crash on lack of expiry templates. ([\#5077](https://github.com/matrix-org/synapse/issues/5077))
|
||||
- Fix the ratelimiting on third party invites. ([\#5104](https://github.com/matrix-org/synapse/issues/5104))
|
||||
- Add some missing limitations to room alias creation. ([\#5124](https://github.com/matrix-org/synapse/issues/5124), [\#5128](https://github.com/matrix-org/synapse/issues/5128))
|
||||
- Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! ([\#5138](https://github.com/matrix-org/synapse/issues/5138))
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add test to verify threepid auth check added in #4435. ([\#4474](https://github.com/matrix-org/synapse/issues/4474))
|
||||
- Fix/improve some docstrings in the replication code. ([\#4949](https://github.com/matrix-org/synapse/issues/4949))
|
||||
- Split synapse.replication.tcp.streams into smaller files. ([\#4953](https://github.com/matrix-org/synapse/issues/4953))
|
||||
- Refactor replication row generation/parsing. ([\#4954](https://github.com/matrix-org/synapse/issues/4954))
|
||||
- Run `black` to clean up formatting on `synapse/storage/roommember.py` and `synapse/storage/events.py`. ([\#4959](https://github.com/matrix-org/synapse/issues/4959))
|
||||
- Remove log line for password via the admin API. ([\#4965](https://github.com/matrix-org/synapse/issues/4965))
|
||||
- Fix typo in TLS filenames in docker/README.md. Also add the '-p' commandline option to the 'docker run' example. Contributed by Jurrie Overgoor. ([\#4968](https://github.com/matrix-org/synapse/issues/4968))
|
||||
- Refactor room version definitions. ([\#4969](https://github.com/matrix-org/synapse/issues/4969))
|
||||
- Reduce log level of .well-known/matrix/client responses. ([\#4972](https://github.com/matrix-org/synapse/issues/4972))
|
||||
- Add `config.signing_key_path` that can be read by `synapse.config` utility. ([\#4974](https://github.com/matrix-org/synapse/issues/4974))
|
||||
- Track which identity server is used when binding a threepid and use that for unbinding, as per MSC1915. ([\#4982](https://github.com/matrix-org/synapse/issues/4982))
|
||||
- Rewrite KeyringTestCase as a HomeserverTestCase. ([\#4985](https://github.com/matrix-org/synapse/issues/4985))
|
||||
- README updates: Corrected the default POSTGRES_USER. Added port forwarding hint in TLS section. ([\#4987](https://github.com/matrix-org/synapse/issues/4987))
|
||||
- Remove a number of unused tables from the database schema. ([\#4992](https://github.com/matrix-org/synapse/issues/4992), [\#5028](https://github.com/matrix-org/synapse/issues/5028), [\#5033](https://github.com/matrix-org/synapse/issues/5033))
|
||||
- Run `black` on the remainder of `synapse/storage/`. ([\#4996](https://github.com/matrix-org/synapse/issues/4996))
|
||||
- Fix grammar in get_current_users_in_room and give it a docstring. ([\#4998](https://github.com/matrix-org/synapse/issues/4998))
|
||||
- Clean up some code in the server-key Keyring. ([\#5001](https://github.com/matrix-org/synapse/issues/5001))
|
||||
- Convert SYNAPSE_NO_TLS Docker variable to boolean for user friendliness. Contributed by Gabriel Eckerson. ([\#5005](https://github.com/matrix-org/synapse/issues/5005))
|
||||
- Refactor synapse.storage._base._simple_select_list_paginate. ([\#5007](https://github.com/matrix-org/synapse/issues/5007))
|
||||
- Store the notary server name correctly in server_keys_json. ([\#5024](https://github.com/matrix-org/synapse/issues/5024))
|
||||
- Rewrite Datastore.get_server_verify_keys to reduce the number of database transactions. ([\#5030](https://github.com/matrix-org/synapse/issues/5030))
|
||||
- Remove extraneous period from copyright headers. ([\#5046](https://github.com/matrix-org/synapse/issues/5046))
|
||||
- Update documentation for where to get Synapse packages. ([\#5067](https://github.com/matrix-org/synapse/issues/5067))
|
||||
- Add workarounds for pep-517 install errors. ([\#5098](https://github.com/matrix-org/synapse/issues/5098))
|
||||
- Improve logging when event-signature checks fail. ([\#5100](https://github.com/matrix-org/synapse/issues/5100))
|
||||
- Factor out an "assert_requester_is_admin" function. ([\#5120](https://github.com/matrix-org/synapse/issues/5120))
|
||||
- Remove the requirement to authenticate for /admin/server_version. ([\#5122](https://github.com/matrix-org/synapse/issues/5122))
|
||||
- Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. ([\#5155](https://github.com/matrix-org/synapse/issues/5155))
|
||||
- Run `black` on the tests directory. ([\#5170](https://github.com/matrix-org/synapse/issues/5170))
|
||||
- Fix CI after new release of isort. ([\#5179](https://github.com/matrix-org/synapse/issues/5179))
|
||||
- Fix bogus imports in unit tests. ([\#5154](https://github.com/matrix-org/synapse/issues/5154))
|
||||
|
||||
|
||||
Synapse 0.99.3.2 (2019-05-03)
|
||||
=============================
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. ([\#5135](https://github.com/matrix-org/synapse/issues/5135))
|
||||
|
||||
|
||||
Synapse 0.99.3.1 (2019-05-03)
|
||||
=============================
|
||||
|
||||
Security update
|
||||
---------------
|
||||
|
||||
This release includes two security fixes:
|
||||
|
||||
- Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! ([\#5133](https://github.com/matrix-org/synapse/issues/5133))
|
||||
- Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! ([\#5134](https://github.com/matrix-org/synapse/issues/5134))
|
||||
|
||||
Synapse 0.99.3 (2019-04-01)
|
||||
===========================
|
||||
|
||||
|
||||
25
INSTALL.md
25
INSTALL.md
@@ -35,7 +35,7 @@ virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse[all]
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||
@@ -48,7 +48,7 @@ update flag:
|
||||
|
||||
```
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse[all]
|
||||
pip install -U matrix-synapse
|
||||
```
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
@@ -257,18 +257,29 @@ https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
#### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via https://matrix.org/packages/debian/. To use them:
|
||||
Synapse via https://packages.matrix.org/debian/. They are available for Debian
|
||||
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||
|
||||
```
|
||||
sudo apt install -y lsb-release curl apt-transport-https
|
||||
echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" |
|
||||
sudo apt install -y lsb-release wget apt-transport-https
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||
curl "https://matrix.org/packages/debian/repo-key.asc" |
|
||||
sudo apt-key add -
|
||||
sudo apt update
|
||||
sudo apt install matrix-synapse-py3
|
||||
```
|
||||
|
||||
**Note**: if you followed a previous version of these instructions which
|
||||
recommended using `apt-key add` to add an old key from
|
||||
`https://matrix.org/packages/debian/`, you should note that this key has been
|
||||
revoked. You should remove the old key with `sudo apt-key remove
|
||||
C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
|
||||
update your configuration.
|
||||
|
||||
The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
#### Downstream Debian/Ubuntu packages
|
||||
|
||||
For `buster` and `sid`, Synapse is available in the Debian repositories and
|
||||
|
||||
@@ -173,7 +173,7 @@ Synapse offers two database engines:
|
||||
* `PostgreSQL <https://www.postgresql.org>`_
|
||||
|
||||
By default Synapse uses SQLite in and doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
Almost all installations should opt to use PostreSQL. Advantages include:
|
||||
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install -e .[all]
|
||||
python -m pip install --no-pep-517 -e .[all]
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
1
changelog.d/4338.feature
Normal file
1
changelog.d/4338.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now more efficiently collates room statistics.
|
||||
@@ -1 +0,0 @@
|
||||
Add test to verify threepid auth check added in #4435.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid redundant URL encoding of redirect URL for SSO login in the fallback login page. Fixes a regression introduced in [#4220](https://github.com/matrix-org/synapse/pull/4220). Contributed by Marcel Fabian Krüger ("[zaugin](https://github.com/zauguin)").
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server.
|
||||
@@ -1 +0,0 @@
|
||||
Add ability for password provider modules to bind email addresses to users upon registration.
|
||||
@@ -1 +0,0 @@
|
||||
Fix/improve some docstrings in the replication code.
|
||||
@@ -1,2 +0,0 @@
|
||||
Split synapse.replication.tcp.streams into smaller files.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Refactor replication row generation/parsing.
|
||||
@@ -1 +0,0 @@
|
||||
Fix sync bug which made accepting invites unreliable in worker-mode synapses.
|
||||
@@ -1 +0,0 @@
|
||||
Fix sync bug which made accepting invites unreliable in worker-mode synapses.
|
||||
@@ -1 +0,0 @@
|
||||
Run `black` to clean up formatting on `synapse/storage/roommember.py` and `synapse/storage/events.py`.
|
||||
@@ -1 +0,0 @@
|
||||
Remove log line for password via the admin API.
|
||||
@@ -1 +0,0 @@
|
||||
Fix typo in TLS filenames in docker/README.md. Also add the '-p' commandline option to the 'docker run' example. Contributed by Jurrie Overgoor.
|
||||
@@ -1,2 +0,0 @@
|
||||
Refactor room version definitions.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add `config.signing_key_path` that can be read by `synapse.config` utility.
|
||||
@@ -1 +0,0 @@
|
||||
start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too.
|
||||
@@ -1 +0,0 @@
|
||||
Track which identity server is used when binding a threepid and use that for unbinding, as per MSC1915.
|
||||
@@ -1 +0,0 @@
|
||||
Rewrite KeyringTestCase as a HomeserverTestCase.
|
||||
@@ -1 +0,0 @@
|
||||
README updates: Corrected the default POSTGRES_USER. Added port forwarding hint in TLS section.
|
||||
@@ -1 +0,0 @@
|
||||
Remove presence list support as per MSC 1819.
|
||||
@@ -1 +0,0 @@
|
||||
Transfer related groups on room upgrade.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce CPU usage starting pushers during start up.
|
||||
@@ -1 +0,0 @@
|
||||
Run `black` on the remainder of `synapse/storage/`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix grammar in get_current_users_in_room and give it a docstring.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent the ability to kick users from a room they aren't in.
|
||||
@@ -1 +0,0 @@
|
||||
Add a delete group admin API.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud <anders@jensenwaud.com>.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor synapse.storage._base._simple_select_list_paginate.
|
||||
@@ -1 +0,0 @@
|
||||
Add config option to block users from looking up 3PIDs.
|
||||
1
changelog.d/5200.bugfix
Normal file
1
changelog.d/5200.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo.
|
||||
1
changelog.d/5230.misc
Normal file
1
changelog.d/5230.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2.
|
||||
1
changelog.d/5232.misc
Normal file
1
changelog.d/5232.misc
Normal file
@@ -0,0 +1 @@
|
||||
Run black on synapse.crypto.keyring.
|
||||
1
changelog.d/5239.bugfix
Normal file
1
changelog.d/5239.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix 500 Internal Server Error when sending an event with `m.relates_to: null`.
|
||||
File diff suppressed because one or more lines are too long
@@ -12,6 +12,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse-%i
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix-synapse.service
|
||||
|
||||
@@ -11,6 +11,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --confi
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
[Install]
|
||||
WantedBy=matrix.target
|
||||
|
||||
@@ -22,10 +22,10 @@ Group=nogroup
|
||||
|
||||
WorkingDirectory=/opt/synapse
|
||||
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
# adjust the cache factor if necessary
|
||||
# Environment=SYNAPSE_CACHE_FACTOR=2.0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
28
debian/changelog
vendored
28
debian/changelog
vendored
@@ -1,3 +1,31 @@
|
||||
matrix-synapse-py3 (0.99.5.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.5.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 May 2019 16:22:24 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.4) stable; urgency=medium
|
||||
|
||||
[ Christoph Müller ]
|
||||
* Configure the systemd units to have a log identifier of `matrix-synapse`
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 0.99.4.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 15 May 2019 13:58:08 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.3.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.3.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 03 May 2019 18:56:20 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.3.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.3.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 03 May 2019 16:02:43 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.3) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
|
||||
1
debian/matrix-synapse.service
vendored
1
debian/matrix-synapse.service
vendored
@@ -11,6 +11,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --confi
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
2
debian/test/.gitignore
vendored
Normal file
2
debian/test/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.vagrant
|
||||
*.log
|
||||
23
debian/test/provision.sh
vendored
Normal file
23
debian/test/provision.sh
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# provisioning script for vagrant boxes for testing the matrix-synapse debs.
|
||||
#
|
||||
# Will install the most recent matrix-synapse-py3 deb for this platform from
|
||||
# the /debs directory.
|
||||
|
||||
set -e
|
||||
|
||||
apt-get update
|
||||
apt-get install -y lsb-release
|
||||
|
||||
deb=`ls /debs/matrix-synapse-py3_*+$(lsb_release -cs)*.deb | sort | tail -n1`
|
||||
|
||||
debconf-set-selections <<EOF
|
||||
matrix-synapse matrix-synapse/report-stats boolean false
|
||||
matrix-synapse matrix-synapse/server-name string localhost:18448
|
||||
EOF
|
||||
|
||||
dpkg -i "$deb"
|
||||
|
||||
sed -i -e '/port: 8...$/{s/8448/18448/; s/8008/18008/}' -e '$aregistration_shared_secret: secret' /etc/matrix-synapse/homeserver.yaml
|
||||
systemctl restart matrix-synapse
|
||||
13
debian/test/stretch/Vagrantfile
vendored
Normal file
13
debian/test/stretch/Vagrantfile
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
ver = `cd ../../..; dpkg-parsechangelog -S Version`.strip()
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "debian/stretch64"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder "../../../../debs", "/debs", type: "nfs"
|
||||
|
||||
config.vm.provision "shell", path: "../provision.sh"
|
||||
end
|
||||
10
debian/test/xenial/Vagrantfile
vendored
Normal file
10
debian/test/xenial/Vagrantfile
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial64"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder "../../../../debs", "/debs"
|
||||
config.vm.provision "shell", path: "../provision.sh"
|
||||
end
|
||||
@@ -50,12 +50,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
debhelper \
|
||||
devscripts \
|
||||
dh-systemd \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3
|
||||
sqlite3 \
|
||||
libpq-dev
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
|
||||
|
||||
|
||||
@@ -102,8 +102,9 @@ when ``SYNAPSE_CONFIG_PATH`` is not set.
|
||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the server public hostname.
|
||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||
statistics reporting back to the Matrix project which helps us to get funding.
|
||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||
you run your own TLS-capable reverse proxy).
|
||||
* `SYNAPSE_NO_TLS`, (accepts `true`, `false`, `on`, `off`, `1`, `0`, `yes`, `no`]): disable
|
||||
TLS in Synapse (use this if you run your own TLS-capable reverse proxy). Defaults
|
||||
to `false` (ie, TLS is enabled by default).
|
||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||
the Synapse instance.
|
||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||
@@ -160,7 +161,7 @@ specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and
|
||||
example:
|
||||
|
||||
```
|
||||
docker run -it --rm
|
||||
docker run -it --rm \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
|
||||
@@ -59,6 +59,18 @@ else:
|
||||
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
||||
|
||||
config_path = "/compiled/homeserver.yaml"
|
||||
|
||||
# Convert SYNAPSE_NO_TLS to boolean if exists
|
||||
if "SYNAPSE_NO_TLS" in environ:
|
||||
tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"])
|
||||
if tlsanswerstring in ("true", "on", "1", "yes"):
|
||||
environ["SYNAPSE_NO_TLS"] = True
|
||||
else:
|
||||
if tlsanswerstring in ("false", "off", "0", "no"):
|
||||
environ["SYNAPSE_NO_TLS"] = False
|
||||
else:
|
||||
print("Environment variable \"SYNAPSE_NO_TLS\" found but value \"" + tlsanswerstring + "\" unrecognized; exiting.")
|
||||
sys.exit(2)
|
||||
|
||||
convert("/conf/homeserver.yaml", config_path, environ)
|
||||
convert("/conf/log.config", "/compiled/log.config", environ)
|
||||
|
||||
@@ -177,7 +177,6 @@ You can do this with a `.well-known` file as follows:
|
||||
on `customer.example.net:8000` it correctly handles HTTP requests with
|
||||
Host header set to `customer.example.net:8000`.
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### Synapse 0.99.0 has just been released, what do I need to do right now?
|
||||
|
||||
42
docs/admin_api/account_validity.rst
Normal file
42
docs/admin_api/account_validity.rst
Normal file
@@ -0,0 +1,42 @@
|
||||
Account validity API
|
||||
====================
|
||||
|
||||
This API allows a server administrator to manage the validity of an account. To
|
||||
use it, you must enable the account validity feature (under
|
||||
``account_validity``) in Synapse's configuration.
|
||||
|
||||
Renew account
|
||||
-------------
|
||||
|
||||
This API extends the validity of an account by as much time as configured in the
|
||||
``period`` parameter from the ``account_validity`` configuration.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/account_validity/validity
|
||||
|
||||
with the following body:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"user_id": "<user ID for the account to renew>",
|
||||
"expiration_ts": 0,
|
||||
"enable_renewal_emails": true
|
||||
}
|
||||
|
||||
|
||||
``expiration_ts`` is an optional parameter and overrides the expiration date,
|
||||
which otherwise defaults to now + validity period.
|
||||
|
||||
``enable_renewal_emails`` is also an optional parameter and enables/disables
|
||||
sending renewal emails to the user. Defaults to true.
|
||||
|
||||
The API returns with the new expiration date for this account, as a timestamp in
|
||||
milliseconds since epoch:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"expiration_ts": 0
|
||||
}
|
||||
@@ -8,7 +8,7 @@ being deleted.
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_matrix/client/r0/admin/delete_group/<group_id>
|
||||
POST /_synapse/admin/v1/delete_group/<group_id>
|
||||
```
|
||||
|
||||
including an `access_token` of a server admin.
|
||||
|
||||
@@ -4,7 +4,7 @@ This API gets a list of known media in a room.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_matrix/client/r0/admin/room/<room_id>/media
|
||||
GET /_synapse/admin/v1/room/<room_id>/media
|
||||
```
|
||||
including an `access_token` of a server admin.
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
@@ -49,7 +49,7 @@ Purge status query
|
||||
|
||||
It is possible to poll for updates on recent purges with a second API;
|
||||
|
||||
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
||||
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
|
||||
|
||||
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||
the following:
|
||||
|
||||
@@ -6,7 +6,7 @@ media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
|
||||
{}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API::
|
||||
|
||||
> GET /_matrix/client/r0/admin/register
|
||||
> GET /_synapse/admin/v1/register
|
||||
|
||||
< {"nonce": "thisisanonce"}
|
||||
|
||||
@@ -22,7 +22,7 @@ body containing the nonce, username, password, whether they are an admin
|
||||
|
||||
As an example::
|
||||
|
||||
> POST /_matrix/client/r0/admin/register
|
||||
> POST /_synapse/admin/v1/register
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
|
||||
48
docs/admin_api/server_notices.md
Normal file
48
docs/admin_api/server_notices.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Server Notices
|
||||
|
||||
The API to send notices is as follows:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/send_server_notice
|
||||
```
|
||||
|
||||
or:
|
||||
|
||||
```
|
||||
PUT /_synapse/admin/v1/send_server_notice/{txnId}
|
||||
```
|
||||
|
||||
You will need to authenticate with an access token for an admin user.
|
||||
|
||||
When using the `PUT` form, retransmissions with the same transaction ID will be
|
||||
ignored in the same way as with `PUT
|
||||
/_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`.
|
||||
|
||||
The request body should look something like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@target_user:server_name",
|
||||
"content": {
|
||||
"msgtype": "m.text",
|
||||
"body": "This is my message"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can optionally include the following additional parameters:
|
||||
|
||||
* `type`: the type of event. Defaults to `m.room.message`.
|
||||
* `state_key`: Setting this will result in a state event being sent.
|
||||
|
||||
|
||||
Once the notice has been sent, the API will return the following response:
|
||||
|
||||
```json
|
||||
{
|
||||
"event_id": "<event_id>"
|
||||
}
|
||||
```
|
||||
|
||||
Note that server notices must be enabled in `homeserver.yaml` before this API
|
||||
can be used. See [server_notices.md](../server_notices.md) for more information.
|
||||
@@ -5,7 +5,7 @@ This API returns information about a specific user account.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
@@ -50,7 +50,7 @@ references to it).
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||
POST /_synapse/admin/v1/deactivate/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
@@ -73,7 +73,7 @@ Changes the password of another user.
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||
POST /_synapse/admin/v1/reset_password/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
|
||||
@@ -8,9 +8,7 @@ contains Synapse version information).
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_matrix/client/r0/admin/server_version
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
GET /_synapse/admin/v1/server_version
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
|
||||
@@ -48,7 +48,10 @@ How to monitor Synapse metrics using Prometheus
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:9092"]
|
||||
- targets: ["my.server.here:port"]
|
||||
|
||||
where ``my.server.here`` is the IP address of Synapse, and ``port`` is the listener port
|
||||
configured with the ``metrics`` resource.
|
||||
|
||||
If your prometheus is older than 1.5.2, you will need to replace
|
||||
``static_configs`` in the above with ``target_groups``.
|
||||
|
||||
@@ -3,6 +3,28 @@ Using Postgres
|
||||
|
||||
Postgres version 9.4 or later is known to work.
|
||||
|
||||
Install postgres client libraries
|
||||
=================================
|
||||
|
||||
Synapse will require the python postgres client library in order to connect to
|
||||
a postgres database.
|
||||
|
||||
* If you are using the `matrix.org debian/ubuntu
|
||||
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||
the necessary libraries will already be installed.
|
||||
|
||||
* For other pre-built packages, please consult the documentation from the
|
||||
relevant package.
|
||||
|
||||
* If you installed synapse `in a virtualenv
|
||||
<../INSTALL.md#installing-from-source>`_, you can install the library with::
|
||||
|
||||
~/synapse/env/bin/pip install matrix-synapse[postgres]
|
||||
|
||||
(substituting the path to your virtualenv for ``~/synapse/env``, if you used a
|
||||
different path). You will require the postgres development files. These are in
|
||||
the ``libpq-dev`` package on Debian-derived distributions.
|
||||
|
||||
Set up database
|
||||
===============
|
||||
|
||||
@@ -26,29 +48,6 @@ encoding use, e.g.::
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
|
||||
Set up client in Debian/Ubuntu
|
||||
===========================
|
||||
|
||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
||||
virtual env::
|
||||
|
||||
sudo apt-get install libpq-dev
|
||||
pip install psycopg2
|
||||
|
||||
Set up client in RHEL/CentOs 7
|
||||
==============================
|
||||
|
||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
||||
postgres 9.4, use the postgres 9.4 packages from
|
||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
||||
|
||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
||||
``psycopg2``. In the virtual env::
|
||||
|
||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
||||
pip install psycopg2
|
||||
|
||||
Tuning Postgres
|
||||
===============
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ Let's assume that we expect clients to connect to our server at
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
</VirtualHost>
|
||||
@@ -77,6 +78,7 @@ Let's assume that we expect clients to connect to our server at
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
</VirtualHost>
|
||||
|
||||
@@ -69,6 +69,20 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#use_presence: false
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'true', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation. Defaults to 'false'.
|
||||
#
|
||||
#restrict_public_rooms_to_local_users: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
@@ -101,6 +115,24 @@ pid_file: DATADIR/homeserver.pid
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# Prevent federation requests from being sent to the following
|
||||
# blacklist IP address CIDR ranges. If this option is not specified, or
|
||||
# specified with an empty list, no ip range blacklist will be enforced.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
federation_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
#
|
||||
@@ -136,8 +168,8 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
# Valid resource names are:
|
||||
#
|
||||
# client: the client-server API (/_matrix/client). Also implies 'media' and
|
||||
# 'static'.
|
||||
# client: the client-server API (/_matrix/client), and the synapse admin
|
||||
# API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
#
|
||||
# consent: user consent forms (/_matrix/consent). See
|
||||
# docs/consent_tracking.md.
|
||||
@@ -236,6 +268,20 @@ listeners:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#require_membership_for_aliases: false
|
||||
|
||||
# Whether to allow per-room membership profiles through the send of membership
|
||||
# events with profile information that differ from the target's global profile.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#allow_per_room_profiles: false
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -257,6 +303,40 @@ listeners:
|
||||
#
|
||||
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
|
||||
|
||||
# Whether to verify TLS certificates when sending federation traffic.
|
||||
#
|
||||
# This currently defaults to `false`, however this will change in
|
||||
# Synapse 1.0 when valid federation certificates will be required.
|
||||
#
|
||||
#federation_verify_certificates: true
|
||||
|
||||
# Skip federation certificate verification on the following whitelist
|
||||
# of domains.
|
||||
#
|
||||
# This setting should only be used in very specific cases, such as
|
||||
# federation over Tor hidden services and similar. For private networks
|
||||
# of homeservers, you likely want to use a private CA instead.
|
||||
#
|
||||
# Only effective if federation_verify_certicates is `true`.
|
||||
#
|
||||
#federation_certificate_verification_whitelist:
|
||||
# - lon.example.com
|
||||
# - *.domain.com
|
||||
# - *.onion
|
||||
|
||||
# List of custom certificate authorities for federation traffic.
|
||||
#
|
||||
# This setting should only normally be used within a private network of
|
||||
# homeservers.
|
||||
#
|
||||
# Note that this list will replace those that are provided by your
|
||||
# operating environment. Certificates must be in PEM format.
|
||||
#
|
||||
#federation_custom_ca_list:
|
||||
# - myCA1.pem
|
||||
# - myCA2.pem
|
||||
# - myCA3.pem
|
||||
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
@@ -372,21 +452,15 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
#
|
||||
#rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
#
|
||||
#rc_message_burst_count: 10.0
|
||||
|
||||
# Ratelimiting settings for registration and login.
|
||||
# Ratelimiting settings for client actions (registration, login, messaging).
|
||||
#
|
||||
# Each ratelimiting configuration is made of two parameters:
|
||||
# - per_second: number of requests a client can send per second.
|
||||
# - burst_count: number of requests a client can send before being throttled.
|
||||
#
|
||||
# Synapse currently uses the following configurations:
|
||||
# - one for messages that ratelimits sending based on the account the client
|
||||
# is using
|
||||
# - one for registration that ratelimits registration requests based on the
|
||||
# client's IP address.
|
||||
# - one for login that ratelimits login requests based on the client's IP
|
||||
@@ -399,6 +473,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
#rc_message:
|
||||
# per_second: 0.2
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_registration:
|
||||
# per_second: 0.17
|
||||
# burst_count: 3
|
||||
@@ -414,29 +492,28 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
# per_second: 0.17
|
||||
# burst_count: 3
|
||||
|
||||
# The federation window size in milliseconds
|
||||
#
|
||||
#federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
# Ratelimiting settings for incoming federation
|
||||
#
|
||||
#federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
# The rc_federation configuration is made up of the following settings:
|
||||
# - window_size: window size in milliseconds
|
||||
# - sleep_limit: number of federation requests from a single server in
|
||||
# a window before the server will delay processing the request.
|
||||
# - sleep_delay: duration in milliseconds to delay processing events
|
||||
# from remote servers by if they go over the sleep limit.
|
||||
# - reject_limit: maximum number of concurrent federation requests
|
||||
# allowed from a single server
|
||||
# - concurrent: number of federation requests to concurrently process
|
||||
# from a single server
|
||||
#
|
||||
#federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
#federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
#
|
||||
#federation_rc_concurrent: 3
|
||||
#rc_federation:
|
||||
# window_size: 1000
|
||||
# sleep_limit: 10
|
||||
# sleep_delay: 500
|
||||
# reject_limit: 50
|
||||
# concurrent: 3
|
||||
|
||||
# Target outgoing federation transaction frequency for sending read-receipts,
|
||||
# per-room.
|
||||
@@ -506,11 +583,12 @@ uploads_path: "DATADIR/uploads"
|
||||
# height: 600
|
||||
# method: scale
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
# Is the preview URL API enabled?
|
||||
#
|
||||
#url_preview_enabled: false
|
||||
# 'false' by default: uncomment the following to enable it (and specify a
|
||||
# url_preview_ip_range_blacklist blacklist).
|
||||
#
|
||||
#url_preview_enabled: true
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
@@ -520,6 +598,12 @@ uploads_path: "DATADIR/uploads"
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
# This must be specified if url_preview_enabled is set. It is recommended that
|
||||
# you uncomment the following list as a starting point.
|
||||
#
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
@@ -530,7 +614,7 @@ uploads_path: "DATADIR/uploads"
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
||||
@@ -643,6 +727,40 @@ uploads_path: "DATADIR/uploads"
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# ``enabled`` defines whether the account validity feature is enabled. Defaults
|
||||
# to False.
|
||||
#
|
||||
# ``period`` allows setting the period after which an account is valid
|
||||
# after its registration. When renewing the account, its validity period
|
||||
# will be extended by this amount of time. This parameter is required when using
|
||||
# the account validity feature.
|
||||
#
|
||||
# ``renew_at`` is the amount of time before an account's expiry date at which
|
||||
# Synapse will send an email to the account's email address with a renewal link.
|
||||
# This needs the ``email`` and ``public_baseurl`` configuration sections to be
|
||||
# filled.
|
||||
#
|
||||
# ``renew_email_subject`` is the subject of the email sent out with the renewal
|
||||
# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter
|
||||
# from the ``email`` section.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
#registrations_require_3pid:
|
||||
@@ -888,7 +1006,7 @@ password_config:
|
||||
|
||||
|
||||
|
||||
# Enable sending emails for notification events
|
||||
# Enable sending emails for notification events or expiry notices
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
@@ -910,6 +1028,9 @@ password_config:
|
||||
# #template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# # Templates for account expiry notices.
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
|
||||
@@ -1032,6 +1153,22 @@ password_config:
|
||||
#
|
||||
|
||||
|
||||
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
|
||||
|
||||
# Server Notices room configuration
|
||||
#
|
||||
# Uncomment this section to enable a room which can be used to send notices
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
Server Notices
|
||||
==============
|
||||
# Server Notices
|
||||
|
||||
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||
channel whereby server administrators can send messages to users on the server.
|
||||
@@ -11,8 +10,7 @@ they may also find a use for features such as "Message of the day".
|
||||
This is a feature specific to Synapse, but it uses standard Matrix
|
||||
communication mechanisms, so should work with any Matrix client.
|
||||
|
||||
User experience
|
||||
---------------
|
||||
## User experience
|
||||
|
||||
When the user is first sent a server notice, they will get an invitation to a
|
||||
room (typically called 'Server Notices', though this is configurable in
|
||||
@@ -29,8 +27,7 @@ levels.
|
||||
Having joined the room, the user can leave the room if they want. Subsequent
|
||||
server notices will then cause a new room to be created.
|
||||
|
||||
Synapse configuration
|
||||
---------------------
|
||||
## Synapse configuration
|
||||
|
||||
Server notices come from a specific user id on the server. Server
|
||||
administrators are free to choose the user id - something like `server` is
|
||||
@@ -58,17 +55,7 @@ room which will be created.
|
||||
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
|
||||
displayname and avatar of the Server Notices user.
|
||||
|
||||
Sending notices
|
||||
---------------
|
||||
## Sending notices
|
||||
|
||||
As of the current version of synapse, there is no convenient interface for
|
||||
sending notices (other than the automated ones sent as part of consent
|
||||
tracking).
|
||||
|
||||
In the meantime, it is possible to test this feature using the manhole. Having
|
||||
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
|
||||
sent with something like:
|
||||
|
||||
```
|
||||
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
|
||||
```
|
||||
To send server notices to users you can use the
|
||||
[admin_api](admin_api/server_notices.md).
|
||||
|
||||
@@ -227,6 +227,12 @@ following regular expressions::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance::
|
||||
|
||||
@@ -24,6 +24,7 @@ DISTS = (
|
||||
"ubuntu:xenial",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:cosmic",
|
||||
"ubuntu:disco",
|
||||
)
|
||||
|
||||
DESC = '''\
|
||||
|
||||
@@ -58,15 +58,11 @@ BOOLEAN_COLUMNS = {
|
||||
|
||||
|
||||
APPEND_ONLY_TABLES = [
|
||||
"event_content_hashes",
|
||||
"event_reference_hashes",
|
||||
"event_signatures",
|
||||
"event_edge_hashes",
|
||||
"events",
|
||||
"event_json",
|
||||
"state_events",
|
||||
"room_memberships",
|
||||
"feedback",
|
||||
"topics",
|
||||
"room_names",
|
||||
"rooms",
|
||||
@@ -88,7 +84,6 @@ APPEND_ONLY_TABLES = [
|
||||
"event_search",
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"current_state_resets",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"public_room_list_stream",
|
||||
|
||||
6
setup.py
6
setup.py
@@ -86,13 +86,9 @@ long_description = read_file(("README.rst",))
|
||||
|
||||
REQUIREMENTS = dependencies['REQUIREMENTS']
|
||||
CONDITIONAL_REQUIREMENTS = dependencies['CONDITIONAL_REQUIREMENTS']
|
||||
ALL_OPTIONAL_REQUIREMENTS = dependencies['ALL_OPTIONAL_REQUIREMENTS']
|
||||
|
||||
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
|
||||
ALL_OPTIONAL_REQUIREMENTS = set()
|
||||
|
||||
for optional_deps in CONDITIONAL_REQUIREMENTS.values():
|
||||
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
|
||||
|
||||
|
||||
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.99.3"
|
||||
__version__ = "0.99.5.1"
|
||||
|
||||
@@ -64,6 +64,8 @@ class Auth(object):
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
self._account_validity = hs.config.account_validity
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
@@ -226,6 +228,17 @@ class Auth(object):
|
||||
token_id = user_info["token_id"]
|
||||
is_guest = user_info["is_guest"]
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled:
|
||||
user_id = user.to_string()
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
|
||||
raise AuthError(
|
||||
403,
|
||||
"User account has expired",
|
||||
errcode=Codes.EXPIRED_ACCOUNT,
|
||||
)
|
||||
|
||||
# device_id may not be present if get_user_by_access_token has been
|
||||
# stubbed out.
|
||||
device_id = user_info.get("device_id")
|
||||
@@ -543,7 +556,7 @@ class Auth(object):
|
||||
""" Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
user (str): mxid of user to check
|
||||
user (UserID): user to check
|
||||
|
||||
Returns:
|
||||
bool: True if the user is an admin
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +20,12 @@
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
|
||||
# the maximum length for a room alias is 255 characters
|
||||
MAX_ALIAS_LENGTH = 255
|
||||
|
||||
# the maximum length for a user id is 255 characters
|
||||
MAX_USERID_LENGTH = 255
|
||||
|
||||
|
||||
class Membership(object):
|
||||
|
||||
@@ -73,6 +79,7 @@ class EventTypes(object):
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
Encryption = "m.room.encryption"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
@@ -113,3 +120,11 @@ class UserTypes(object):
|
||||
"""
|
||||
SUPPORT = "support"
|
||||
ALL_USER_TYPES = (SUPPORT,)
|
||||
|
||||
|
||||
class RelationTypes(object):
|
||||
"""The types of relations known to this server.
|
||||
"""
|
||||
ANNOTATION = "m.annotation"
|
||||
REPLACE = "m.replace"
|
||||
REFERENCE = "m.reference"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -60,6 +60,7 @@ class Codes(object):
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -327,9 +328,23 @@ class RoomKeysVersionError(SynapseError):
|
||||
self.current_version = current_version
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support."""
|
||||
class UnsupportedRoomVersionError(SynapseError):
|
||||
"""The client's request to create a room used a room version that the server does
|
||||
not support."""
|
||||
def __init__(self):
|
||||
super(UnsupportedRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Homeserver does not support this room version",
|
||||
errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support.
|
||||
|
||||
Unlike UnsupportedRoomVersionError, it is specific to the case of the make_join
|
||||
failing.
|
||||
"""
|
||||
def __init__(self, room_version):
|
||||
super(IncompatibleRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
|
||||
@@ -19,13 +19,15 @@ class EventFormatVersions(object):
|
||||
"""This is an internal enum for tracking the version of the event format,
|
||||
independently from the room version.
|
||||
"""
|
||||
V1 = 1 # $id:server format
|
||||
V2 = 2 # MSC1659-style $hash format: introduced for room v3
|
||||
V1 = 1 # $id:server event id format
|
||||
V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
|
||||
V3 = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.V3,
|
||||
}
|
||||
|
||||
|
||||
@@ -75,6 +77,12 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V2,
|
||||
StateResolutionVersions.V2,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
)
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
@@ -87,5 +95,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V2,
|
||||
RoomVersions.V3,
|
||||
RoomVersions.STATE_V2_TEST,
|
||||
RoomVersions.V4,
|
||||
)
|
||||
} # type: dict[str, RoomVersion]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,8 +22,7 @@ from six.moves.urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||
CLIENT_API_PREFIX = "/_matrix/client"
|
||||
FEDERATION_PREFIX = "/_matrix/federation"
|
||||
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
||||
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
||||
|
||||
@@ -22,13 +22,14 @@ import traceback
|
||||
import psutil
|
||||
from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import error, reactor
|
||||
from twisted.internet import defer, error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -99,6 +100,8 @@ def start_reactor(
|
||||
logger (logging.Logger): logger instance to pass to Daemonize
|
||||
"""
|
||||
|
||||
install_dns_limiter(reactor)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
@@ -312,3 +315,81 @@ def setup_sentry(hs):
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
||||
"""Replaces the resolver with one that limits the number of in flight DNS
|
||||
requests.
|
||||
|
||||
This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
|
||||
can run out of file descriptors and infinite loop if we attempt to do too
|
||||
many DNS queries at once
|
||||
"""
|
||||
new_resolver = _LimitedHostnameResolver(
|
||||
reactor.nameResolver, max_dns_requests_in_flight,
|
||||
)
|
||||
|
||||
reactor.installNameResolver(new_resolver)
|
||||
|
||||
|
||||
class _LimitedHostnameResolver(object):
|
||||
"""Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
|
||||
"""
|
||||
|
||||
def __init__(self, resolver, max_dns_requests_in_flight):
|
||||
self._resolver = resolver
|
||||
self._limiter = Linearizer(
|
||||
name="dns_client_limiter", max_count=max_dns_requests_in_flight,
|
||||
)
|
||||
|
||||
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
|
||||
addressTypes=None, transportSemantics='TCP'):
|
||||
# Note this is happening deep within the reactor, so we don't need to
|
||||
# worry about log contexts.
|
||||
|
||||
# We need this function to return `resolutionReceiver` so we do all the
|
||||
# actual logic involving deferreds in a separate function.
|
||||
self._resolve(
|
||||
resolutionReceiver, hostName, portNumber,
|
||||
addressTypes, transportSemantics,
|
||||
)
|
||||
|
||||
return resolutionReceiver
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _resolve(self, resolutionReceiver, hostName, portNumber=0,
|
||||
addressTypes=None, transportSemantics='TCP'):
|
||||
|
||||
with (yield self._limiter.queue(())):
|
||||
# resolveHostName doesn't return a Deferred, so we need to hook into
|
||||
# the receiver interface to get told when resolution has finished.
|
||||
|
||||
deferred = defer.Deferred()
|
||||
receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)
|
||||
|
||||
self._resolver.resolveHostName(
|
||||
receiver, hostName, portNumber,
|
||||
addressTypes, transportSemantics,
|
||||
)
|
||||
|
||||
yield deferred
|
||||
|
||||
|
||||
class _DeferredResolutionReceiver(object):
|
||||
"""Wraps a IResolutionReceiver and simply resolves the given deferred when
|
||||
resolution is complete
|
||||
"""
|
||||
|
||||
def __init__(self, receiver, deferred):
|
||||
self._receiver = receiver
|
||||
self._deferred = deferred
|
||||
|
||||
def resolutionBegan(self, resolutionInProgress):
|
||||
self._receiver.resolutionBegan(resolutionInProgress)
|
||||
|
||||
def addressResolved(self, address):
|
||||
self._receiver.addressResolved(address)
|
||||
|
||||
def resolutionComplete(self):
|
||||
self._deferred.callback(())
|
||||
self._receiver.resolutionComplete()
|
||||
|
||||
@@ -38,6 +38,7 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
@@ -45,6 +46,7 @@ from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
@@ -52,9 +54,11 @@ from synapse.rest.client.v1.room import (
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -78,6 +82,7 @@ class ClientReaderSlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
@@ -109,12 +114,12 @@ class ClientReaderServer(HomeServer):
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
KeyQueryServlet(self).register(resource)
|
||||
KeyChangesServlet(self).register(resource)
|
||||
VoipRestServlet(self).register(resource)
|
||||
PushRuleRestServlet(self).register(resource)
|
||||
VersionsRestServlet().register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
"/_matrix/client": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -62,6 +62,7 @@ from synapse.python_dependencies import check_requirements
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.well_known import WellKnownResource
|
||||
@@ -180,6 +181,7 @@ class SynapseHomeServer(HomeServer):
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
"/_matrix/client/versions": client_resource,
|
||||
"/.well-known/matrix/client": WellKnownResource(self),
|
||||
"/_synapse/admin": AdminRestResource(self),
|
||||
})
|
||||
|
||||
if self.get_config().saml2_enabled:
|
||||
@@ -518,6 +520,7 @@ def run(hs):
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
@@ -558,7 +561,6 @@ def run(hs):
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
|
||||
@@ -71,6 +71,12 @@ class EmailConfig(Config):
|
||||
self.email_notif_from = email_config["notif_from"]
|
||||
self.email_notif_template_html = email_config["notif_template_html"]
|
||||
self.email_notif_template_text = email_config["notif_template_text"]
|
||||
self.email_expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html",
|
||||
)
|
||||
self.email_expiry_template_text = email_config.get(
|
||||
"expiry_template_text", "notice_expiry.txt",
|
||||
)
|
||||
|
||||
template_dir = email_config.get("template_dir")
|
||||
# we need an absolute path, because we change directory after starting (and
|
||||
@@ -120,7 +126,7 @@ class EmailConfig(Config):
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for notification events
|
||||
# Enable sending emails for notification events or expiry notices
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
@@ -142,6 +148,9 @@ class EmailConfig(Config):
|
||||
# #template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# # Templates for account expiry notices.
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
"""
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .captcha import CaptchaConfig
|
||||
@@ -36,20 +37,41 @@ from .saml2_config import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .stats import StatsConfig
|
||||
from .tls import TlsConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
from .voip import VoipConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
ServerNoticesConfig, RoomDirectoryConfig,
|
||||
):
|
||||
class HomeServerConfig(
|
||||
ServerConfig,
|
||||
TlsConfig,
|
||||
DatabaseConfig,
|
||||
LoggingConfig,
|
||||
RatelimitConfig,
|
||||
ContentRepositoryConfig,
|
||||
CaptchaConfig,
|
||||
VoipConfig,
|
||||
RegistrationConfig,
|
||||
MetricsConfig,
|
||||
ApiConfig,
|
||||
AppServiceConfig,
|
||||
KeyConfig,
|
||||
SAML2Config,
|
||||
CasConfig,
|
||||
JWTConfig,
|
||||
PasswordConfig,
|
||||
EmailConfig,
|
||||
WorkerConfig,
|
||||
PasswordAuthProviderConfig,
|
||||
PushConfig,
|
||||
SpamCheckerConfig,
|
||||
GroupsConfig,
|
||||
UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
StatsConfig,
|
||||
ServerNoticesConfig,
|
||||
RoomDirectoryConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -16,16 +16,56 @@ from ._base import Config
|
||||
|
||||
|
||||
class RateLimitConfig(object):
|
||||
def __init__(self, config):
|
||||
self.per_second = config.get("per_second", 0.17)
|
||||
self.burst_count = config.get("burst_count", 3.0)
|
||||
def __init__(self, config, defaults={"per_second": 0.17, "burst_count": 3.0}):
|
||||
self.per_second = config.get("per_second", defaults["per_second"])
|
||||
self.burst_count = config.get("burst_count", defaults["burst_count"])
|
||||
|
||||
|
||||
class FederationRateLimitConfig(object):
|
||||
_items_and_default = {
|
||||
"window_size": 10000,
|
||||
"sleep_limit": 10,
|
||||
"sleep_delay": 500,
|
||||
"reject_limit": 50,
|
||||
"concurrent": 3,
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for i in self._items_and_default.keys():
|
||||
setattr(self, i, kwargs.get(i) or self._items_and_default[i])
|
||||
|
||||
|
||||
class RatelimitConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.rc_messages_per_second = config.get("rc_messages_per_second", 0.2)
|
||||
self.rc_message_burst_count = config.get("rc_message_burst_count", 10.0)
|
||||
|
||||
# Load the new-style messages config if it exists. Otherwise fall back
|
||||
# to the old method.
|
||||
if "rc_message" in config:
|
||||
self.rc_message = RateLimitConfig(
|
||||
config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
|
||||
)
|
||||
else:
|
||||
self.rc_message = RateLimitConfig(
|
||||
{
|
||||
"per_second": config.get("rc_messages_per_second", 0.2),
|
||||
"burst_count": config.get("rc_message_burst_count", 10.0),
|
||||
}
|
||||
)
|
||||
|
||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||
# back to the old method.
|
||||
if "federation_rc" in config:
|
||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||
else:
|
||||
self.rc_federation = FederationRateLimitConfig(
|
||||
**{
|
||||
"window_size": config.get("federation_rc_window_size"),
|
||||
"sleep_limit": config.get("federation_rc_sleep_limit"),
|
||||
"sleep_delay": config.get("federation_rc_sleep_delay"),
|
||||
"reject_limit": config.get("federation_rc_reject_limit"),
|
||||
"concurrent": config.get("federation_rc_concurrent"),
|
||||
}
|
||||
)
|
||||
|
||||
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
|
||||
|
||||
@@ -33,38 +73,26 @@ class RatelimitConfig(Config):
|
||||
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
|
||||
self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
|
||||
self.rc_login_failed_attempts = RateLimitConfig(
|
||||
rc_login_config.get("failed_attempts", {}),
|
||||
rc_login_config.get("failed_attempts", {})
|
||||
)
|
||||
|
||||
self.federation_rc_window_size = config.get("federation_rc_window_size", 1000)
|
||||
self.federation_rc_sleep_limit = config.get("federation_rc_sleep_limit", 10)
|
||||
self.federation_rc_sleep_delay = config.get("federation_rc_sleep_delay", 500)
|
||||
self.federation_rc_reject_limit = config.get("federation_rc_reject_limit", 50)
|
||||
self.federation_rc_concurrent = config.get("federation_rc_concurrent", 3)
|
||||
|
||||
self.federation_rr_transactions_per_room_per_second = config.get(
|
||||
"federation_rr_transactions_per_room_per_second", 50,
|
||||
"federation_rr_transactions_per_room_per_second", 50
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
#
|
||||
#rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
#
|
||||
#rc_message_burst_count: 10.0
|
||||
|
||||
# Ratelimiting settings for registration and login.
|
||||
# Ratelimiting settings for client actions (registration, login, messaging).
|
||||
#
|
||||
# Each ratelimiting configuration is made of two parameters:
|
||||
# - per_second: number of requests a client can send per second.
|
||||
# - burst_count: number of requests a client can send before being throttled.
|
||||
#
|
||||
# Synapse currently uses the following configurations:
|
||||
# - one for messages that ratelimits sending based on the account the client
|
||||
# is using
|
||||
# - one for registration that ratelimits registration requests based on the
|
||||
# client's IP address.
|
||||
# - one for login that ratelimits login requests based on the client's IP
|
||||
@@ -77,6 +105,10 @@ class RatelimitConfig(Config):
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
#rc_message:
|
||||
# per_second: 0.2
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_registration:
|
||||
# per_second: 0.17
|
||||
# burst_count: 3
|
||||
@@ -92,29 +124,28 @@ class RatelimitConfig(Config):
|
||||
# per_second: 0.17
|
||||
# burst_count: 3
|
||||
|
||||
# The federation window size in milliseconds
|
||||
#
|
||||
#federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
# Ratelimiting settings for incoming federation
|
||||
#
|
||||
#federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
# The rc_federation configuration is made up of the following settings:
|
||||
# - window_size: window size in milliseconds
|
||||
# - sleep_limit: number of federation requests from a single server in
|
||||
# a window before the server will delay processing the request.
|
||||
# - sleep_delay: duration in milliseconds to delay processing events
|
||||
# from remote servers by if they go over the sleep limit.
|
||||
# - reject_limit: maximum number of concurrent federation requests
|
||||
# allowed from a single server
|
||||
# - concurrent: number of federation requests to concurrently process
|
||||
# from a single server
|
||||
#
|
||||
#federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
#federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
#
|
||||
#federation_rc_concurrent: 3
|
||||
#rc_federation:
|
||||
# window_size: 1000
|
||||
# sleep_limit: 10
|
||||
# sleep_delay: 500
|
||||
# reject_limit: 50
|
||||
# concurrent: 3
|
||||
|
||||
# Target outgoing federation transaction frequency for sending read-receipts,
|
||||
# per-room.
|
||||
|
||||
@@ -20,6 +20,29 @@ from synapse.types import RoomAlias
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
|
||||
class AccountValidityConfig(Config):
|
||||
def __init__(self, config, synapse_config):
|
||||
self.enabled = config.get("enabled", False)
|
||||
self.renew_by_email_enabled = ("renew_at" in config)
|
||||
|
||||
if self.enabled:
|
||||
if "period" in config:
|
||||
self.period = self.parse_duration(config["period"])
|
||||
else:
|
||||
raise ConfigError("'period' is required when using account validity")
|
||||
|
||||
if "renew_at" in config:
|
||||
self.renew_at = self.parse_duration(config["renew_at"])
|
||||
|
||||
if "renew_email_subject" in config:
|
||||
self.renew_email_subject = config["renew_email_subject"]
|
||||
else:
|
||||
self.renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
@@ -31,6 +54,10 @@ class RegistrationConfig(Config):
|
||||
strtobool(str(config["disable_registration"]))
|
||||
)
|
||||
|
||||
self.account_validity = AccountValidityConfig(
|
||||
config.get("account_validity", {}), config,
|
||||
)
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
|
||||
@@ -76,6 +103,40 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# ``enabled`` defines whether the account validity feature is enabled. Defaults
|
||||
# to False.
|
||||
#
|
||||
# ``period`` allows setting the period after which an account is valid
|
||||
# after its registration. When renewing the account, its validity period
|
||||
# will be extended by this amount of time. This parameter is required when using
|
||||
# the account validity feature.
|
||||
#
|
||||
# ``renew_at`` is the amount of time before an account's expiry date at which
|
||||
# Synapse will send an email to the account's email address with a renewal link.
|
||||
# This needs the ``email`` and ``public_baseurl`` configuration sections to be
|
||||
# filled.
|
||||
#
|
||||
# ``renew_email_subject`` is the subject of the email sent out with the renewal
|
||||
# link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
|
||||
# from the ``email`` section.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
#registrations_require_3pid:
|
||||
|
||||
@@ -186,17 +186,21 @@ class ContentRepositoryConfig(Config):
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_NETADDR)
|
||||
|
||||
if "url_preview_ip_range_blacklist" in config:
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
else:
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
raise ConfigError(
|
||||
"For security, you must specify an explicit target IP address "
|
||||
"blacklist in url_preview_ip_range_blacklist for url previewing "
|
||||
"to work"
|
||||
)
|
||||
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
|
||||
# we always blacklist '0.0.0.0' and '::', which are supposed to be
|
||||
# unroutable addresses.
|
||||
self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::'])
|
||||
|
||||
self.url_preview_ip_range_whitelist = IPSet(
|
||||
config.get("url_preview_ip_range_whitelist", ())
|
||||
)
|
||||
@@ -260,11 +264,12 @@ class ContentRepositoryConfig(Config):
|
||||
#thumbnail_sizes:
|
||||
%(formatted_thumbnail_sizes)s
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
# Is the preview URL API enabled?
|
||||
#
|
||||
#url_preview_enabled: false
|
||||
# 'false' by default: uncomment the following to enable it (and specify a
|
||||
# url_preview_ip_range_blacklist blacklist).
|
||||
#
|
||||
#url_preview_enabled: true
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
@@ -274,6 +279,12 @@ class ContentRepositoryConfig(Config):
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
# This must be specified if url_preview_enabled is set. It is recommended that
|
||||
# you uncomment the following list as a starting point.
|
||||
#
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
@@ -284,7 +295,7 @@ class ContentRepositoryConfig(Config):
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,6 +18,8 @@
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
@@ -37,6 +40,7 @@ class ServerConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.server_name = config["server_name"]
|
||||
self.server_context = config.get("server_context", None)
|
||||
|
||||
try:
|
||||
parse_and_validate_server_name(self.server_name)
|
||||
@@ -71,6 +75,19 @@ class ServerConfig(Config):
|
||||
# master, potentially causing inconsistency.
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API.
|
||||
self.require_auth_for_profile_requests = config.get(
|
||||
"require_auth_for_profile_requests", False,
|
||||
)
|
||||
|
||||
# If set to 'True', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation.
|
||||
self.restrict_public_rooms_to_local_users = config.get(
|
||||
"restrict_public_rooms_to_local_users", False,
|
||||
)
|
||||
|
||||
# whether to enable search. If disabled, new entries will not be inserted
|
||||
# into the search tables and they will not be indexed. Users will receive
|
||||
# errors when attempting to search for messages.
|
||||
@@ -84,6 +101,11 @@ class ServerConfig(Config):
|
||||
"block_non_admin_invites", False,
|
||||
)
|
||||
|
||||
# Whether to enable experimental MSC1849 (aka relations) support
|
||||
self.experimental_msc1849_support_enabled = config.get(
|
||||
"experimental_msc1849_support_enabled", False,
|
||||
)
|
||||
|
||||
# Options to control access by tracking MAU
|
||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||
self.max_mau_value = 0
|
||||
@@ -113,14 +135,34 @@ class ServerConfig(Config):
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
federation_domain_whitelist = config.get(
|
||||
"federation_domain_whitelist", None
|
||||
"federation_domain_whitelist", None,
|
||||
)
|
||||
# turn the whitelist into a hash for speed of lookup
|
||||
|
||||
if federation_domain_whitelist is not None:
|
||||
# turn the whitelist into a hash for speed of lookup
|
||||
self.federation_domain_whitelist = {}
|
||||
|
||||
for domain in federation_domain_whitelist:
|
||||
self.federation_domain_whitelist[domain] = True
|
||||
|
||||
self.federation_ip_range_blacklist = config.get(
|
||||
"federation_ip_range_blacklist", [],
|
||||
)
|
||||
|
||||
# Attempt to create an IPSet from the given ranges
|
||||
try:
|
||||
self.federation_ip_range_blacklist = IPSet(
|
||||
self.federation_ip_range_blacklist
|
||||
)
|
||||
|
||||
# Always blacklist 0.0.0.0, ::
|
||||
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Invalid range(s) provided in "
|
||||
"federation_ip_range_blacklist: %s" % e
|
||||
)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
self.public_baseurl += '/'
|
||||
@@ -131,6 +173,16 @@ class ServerConfig(Config):
|
||||
# sending out any replication updates.
|
||||
self.replication_torture_level = config.get("replication_torture_level")
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to True.
|
||||
self.require_membership_for_aliases = config.get(
|
||||
"require_membership_for_aliases", True,
|
||||
)
|
||||
|
||||
# Whether to allow per-room membership profiles through the send of membership
|
||||
# events with profile information that differ from the target's global profile.
|
||||
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
||||
|
||||
self.listeners = []
|
||||
for listener in config.get("listeners", []):
|
||||
if not isinstance(listener.get("port", None), int):
|
||||
@@ -318,6 +370,20 @@ class ServerConfig(Config):
|
||||
#
|
||||
#use_presence: false
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'true', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation. Defaults to 'false'.
|
||||
#
|
||||
#restrict_public_rooms_to_local_users: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
@@ -350,6 +416,24 @@ class ServerConfig(Config):
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# Prevent federation requests from being sent to the following
|
||||
# blacklist IP address CIDR ranges. If this option is not specified, or
|
||||
# specified with an empty list, no ip range blacklist will be enforced.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
federation_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
#
|
||||
@@ -385,8 +469,8 @@ class ServerConfig(Config):
|
||||
#
|
||||
# Valid resource names are:
|
||||
#
|
||||
# client: the client-server API (/_matrix/client). Also implies 'media' and
|
||||
# 'static'.
|
||||
# client: the client-server API (/_matrix/client), and the synapse admin
|
||||
# API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
#
|
||||
# consent: user consent forms (/_matrix/consent). See
|
||||
# docs/consent_tracking.md.
|
||||
@@ -484,6 +568,20 @@ class ServerConfig(Config):
|
||||
#mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#require_membership_for_aliases: false
|
||||
|
||||
# Whether to allow per-room membership profiles through the send of membership
|
||||
# events with profile information that differ from the target's global profile.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#allow_per_room_profiles: false
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
|
||||
60
synapse/config/stats.py
Normal file
60
synapse/config/stats.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import sys
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
Configuration for the behaviour of synapse's stats engine
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
self.stats_enabled = True
|
||||
self.stats_bucket_size = 86400
|
||||
self.stats_retention = sys.maxsize
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
|
||||
self.stats_bucket_size = (
|
||||
self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
|
||||
)
|
||||
self.stats_retention = (
|
||||
self.parse_duration(
|
||||
stats_config.get("retention", "%ds" % (sys.maxsize,))
|
||||
)
|
||||
/ 1000
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
"""
|
||||
@@ -24,8 +24,10 @@ import six
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.util import glob_to_regex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -70,6 +72,53 @@ class TlsConfig(Config):
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Whether to verify certificates on outbound federation traffic
|
||||
self.federation_verify_certificates = config.get(
|
||||
"federation_verify_certificates", False,
|
||||
)
|
||||
|
||||
# Whitelist of domains to not verify certificates for
|
||||
fed_whitelist_entries = config.get(
|
||||
"federation_certificate_verification_whitelist", [],
|
||||
)
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = []
|
||||
for entry in fed_whitelist_entries:
|
||||
# Convert globs to regex
|
||||
entry_regex = glob_to_regex(entry)
|
||||
self.federation_certificate_verification_whitelist.append(entry_regex)
|
||||
|
||||
# List of custom certificate authorities for federation traffic validation
|
||||
custom_ca_list = config.get(
|
||||
"federation_custom_ca_list", None,
|
||||
)
|
||||
|
||||
# Read in and parse custom CA certificates
|
||||
self.federation_ca_trust_root = None
|
||||
if custom_ca_list is not None:
|
||||
if len(custom_ca_list) == 0:
|
||||
# A trustroot cannot be generated without any CA certificates.
|
||||
# Raise an error if this option has been specified without any
|
||||
# corresponding certificates.
|
||||
raise ConfigError("federation_custom_ca_list specified without "
|
||||
"any certificate files")
|
||||
|
||||
certs = []
|
||||
for ca_file in custom_ca_list:
|
||||
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
||||
content = self.read_file(ca_file)
|
||||
|
||||
# Parse the CA certificates
|
||||
try:
|
||||
cert_base = Certificate.loadPEM(content)
|
||||
certs.append(cert_base)
|
||||
except Exception as e:
|
||||
raise ConfigError("Error parsing custom CA certificate file %s: %s"
|
||||
% (ca_file, e))
|
||||
|
||||
self.federation_ca_trust_root = trustRootFromCertificates(certs)
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
# It should never be used in production, and is intended for
|
||||
@@ -99,15 +148,15 @@ class TlsConfig(Config):
|
||||
try:
|
||||
with open(self.tls_certificate_file, 'rb') as f:
|
||||
cert_pem = f.read()
|
||||
except Exception:
|
||||
logger.exception("Failed to read existing certificate off disk!")
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to read existing certificate file %s: %s"
|
||||
% (self.tls_certificate_file, e))
|
||||
|
||||
try:
|
||||
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||
except Exception:
|
||||
logger.exception("Failed to parse existing certificate off disk!")
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to parse existing certificate file %s: %s"
|
||||
% (self.tls_certificate_file, e))
|
||||
|
||||
if not allow_self_signed:
|
||||
if tls_certificate.get_subject() == tls_certificate.get_issuer():
|
||||
@@ -192,6 +241,40 @@ class TlsConfig(Config):
|
||||
#
|
||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# Whether to verify TLS certificates when sending federation traffic.
|
||||
#
|
||||
# This currently defaults to `false`, however this will change in
|
||||
# Synapse 1.0 when valid federation certificates will be required.
|
||||
#
|
||||
#federation_verify_certificates: true
|
||||
|
||||
# Skip federation certificate verification on the following whitelist
|
||||
# of domains.
|
||||
#
|
||||
# This setting should only be used in very specific cases, such as
|
||||
# federation over Tor hidden services and similar. For private networks
|
||||
# of homeservers, you likely want to use a private CA instead.
|
||||
#
|
||||
# Only effective if federation_verify_certicates is `true`.
|
||||
#
|
||||
#federation_certificate_verification_whitelist:
|
||||
# - lon.example.com
|
||||
# - *.domain.com
|
||||
# - *.onion
|
||||
|
||||
# List of custom certificate authorities for federation traffic.
|
||||
#
|
||||
# This setting should only normally be used within a private network of
|
||||
# homeservers.
|
||||
#
|
||||
# Note that this list will replace those that are provided by your
|
||||
# operating environment. Certificates must be in PEM format.
|
||||
#
|
||||
#federation_custom_ca_list:
|
||||
# - myCA1.pem
|
||||
# - myCA2.pem
|
||||
# - myCA3.pem
|
||||
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
|
||||
@@ -18,10 +18,10 @@ import logging
|
||||
from zope.interface import implementer
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName
|
||||
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -90,7 +90,7 @@ def _tolerateErrors(wrapped):
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class ClientTLSOptions(object):
|
||||
class ClientTLSOptionsNoVerify(object):
|
||||
"""
|
||||
Client creator for TLS without certificate identity verification. This is a
|
||||
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
|
||||
@@ -127,9 +127,30 @@ class ClientTLSOptionsFactory(object):
|
||||
to remote servers for federation."""
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
self._options = CertificateOptions(verify=False)
|
||||
self._config = config
|
||||
self._options_noverify = CertificateOptions()
|
||||
|
||||
# Check if we're using a custom list of a CA certificates
|
||||
trust_root = config.federation_ca_trust_root
|
||||
if trust_root is None:
|
||||
# Use CA root certs provided by OpenSSL
|
||||
trust_root = platformTrust()
|
||||
|
||||
self._options_verify = CertificateOptions(trustRoot=trust_root)
|
||||
|
||||
def get_options(self, host):
|
||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
||||
return ClientTLSOptions(host, self._options._makeContext())
|
||||
|
||||
# Check if certificate verification has been enabled
|
||||
should_verify = self._config.federation_verify_certificates
|
||||
|
||||
# Check if we've disabled certificate verification for this host
|
||||
if should_verify:
|
||||
for regex in self._config.federation_certificate_verification_whitelist:
|
||||
if regex.match(host):
|
||||
should_verify = False
|
||||
break
|
||||
|
||||
if should_verify:
|
||||
return ClientTLSOptions(host, self._options_verify._makeContext())
|
||||
return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext())
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017, 2018 New Vector Ltd.
|
||||
# Copyright 2017, 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +20,7 @@ from collections import namedtuple
|
||||
from six import raise_from
|
||||
from six.moves import urllib
|
||||
|
||||
import nacl.signing
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -55,9 +56,9 @@ from synapse.util.retryutils import NotRetryingDestination
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VerifyKeyRequest = namedtuple("VerifyRequest", (
|
||||
"server_name", "key_ids", "json_object", "deferred"
|
||||
))
|
||||
VerifyKeyRequest = namedtuple(
|
||||
"VerifyRequest", ("server_name", "key_ids", "json_object", "deferred")
|
||||
)
|
||||
"""
|
||||
A request for a verify key to verify a JSON object.
|
||||
|
||||
@@ -95,9 +96,7 @@ class Keyring(object):
|
||||
|
||||
def verify_json_for_server(self, server_name, json_object):
|
||||
return logcontext.make_deferred_yieldable(
|
||||
self.verify_json_objects_for_server(
|
||||
[(server_name, json_object)]
|
||||
)[0]
|
||||
self.verify_json_objects_for_server([(server_name, json_object)])[0]
|
||||
)
|
||||
|
||||
def verify_json_objects_for_server(self, server_and_json):
|
||||
@@ -113,40 +112,51 @@ class Keyring(object):
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
"""
|
||||
# a list of VerifyKeyRequests
|
||||
verify_requests = []
|
||||
handle = preserve_fn(_handle_key_deferred)
|
||||
|
||||
for server_name, json_object in server_and_json:
|
||||
def process(server_name, json_object):
|
||||
"""Process an entry in the request list
|
||||
|
||||
Given a (server_name, json_object) pair from the request list,
|
||||
adds a key request to verify_requests, and returns a deferred which will
|
||||
complete or fail (in the sentinel context) when verification completes.
|
||||
"""
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
|
||||
if not key_ids:
|
||||
logger.warn("Request from %s: no supported signature keys",
|
||||
server_name)
|
||||
deferred = defer.fail(SynapseError(
|
||||
400,
|
||||
"Not signed with a supported algorithm",
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
else:
|
||||
deferred = defer.Deferred()
|
||||
return defer.fail(
|
||||
SynapseError(
|
||||
400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("Verifying for %s with key_ids %s",
|
||||
server_name, key_ids)
|
||||
logger.debug("Verifying for %s with key_ids %s", server_name, key_ids)
|
||||
|
||||
# add the key request to the queue, but don't start it off yet.
|
||||
verify_request = VerifyKeyRequest(
|
||||
server_name, key_ids, json_object, deferred
|
||||
server_name, key_ids, json_object, defer.Deferred()
|
||||
)
|
||||
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
run_in_background(self._start_key_lookups, verify_requests)
|
||||
# now run _handle_key_deferred, which will wait for the key request
|
||||
# to complete and then do the verification.
|
||||
#
|
||||
# We want _handle_key_request to log to the right context, so we
|
||||
# wrap it with preserve_fn (aka run_in_background)
|
||||
return handle(verify_request)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
handle = preserve_fn(_handle_key_deferred)
|
||||
return [
|
||||
handle(rq) for rq in verify_requests
|
||||
results = [
|
||||
process(server_name, json_object)
|
||||
for server_name, json_object in server_and_json
|
||||
]
|
||||
|
||||
if verify_requests:
|
||||
run_in_background(self._start_key_lookups, verify_requests)
|
||||
|
||||
return results
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_key_lookups(self, verify_requests):
|
||||
"""Sets off the key fetches for each verify request
|
||||
@@ -164,15 +174,13 @@ class Keyring(object):
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
rq.server_name: defer.Deferred() for rq in verify_requests
|
||||
}
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
[rq.server_name for rq in verify_requests], server_to_deferred
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
@@ -201,9 +209,7 @@ class Keyring(object):
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
)
|
||||
verify_request.deferred.addBoth(remove_deferreds, verify_request)
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
@@ -233,7 +239,8 @@ class Keyring(object):
|
||||
break
|
||||
logger.info(
|
||||
"Waiting for existing lookups for %s to complete [loop %i]",
|
||||
[w[0] for w in wait_on], loop_count,
|
||||
[w[0] for w in wait_on],
|
||||
loop_count,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList((w[1] for w in wait_on))
|
||||
@@ -274,10 +281,6 @@ class Keyring(object):
|
||||
@defer.inlineCallbacks
|
||||
def do_iterations():
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
# dict[str, dict[str, VerifyKey]]: results so far.
|
||||
# map server_name -> key_id -> VerifyKey
|
||||
merged_results = {}
|
||||
|
||||
# dict[str, set(str)]: keys to fetch for each server
|
||||
missing_keys = {}
|
||||
for verify_request in verify_requests:
|
||||
@@ -287,29 +290,29 @@ class Keyring(object):
|
||||
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which verify requests we have keys
|
||||
# for and which we don't
|
||||
missing_keys = {}
|
||||
requests_missing_keys = []
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
result_keys = merged_results[server_name]
|
||||
|
||||
if verify_request.deferred.called:
|
||||
# We've already called this deferred, which probably
|
||||
# means that we've already found a key for it.
|
||||
continue
|
||||
|
||||
server_name = verify_request.server_name
|
||||
|
||||
# see if any of the keys we got this time are sufficient to
|
||||
# complete this VerifyKeyRequest.
|
||||
result_keys = results.get(server_name, {})
|
||||
for key_id in verify_request.key_ids:
|
||||
if key_id in result_keys:
|
||||
key = result_keys.get(key_id)
|
||||
if key:
|
||||
with PreserveLoggingContext():
|
||||
verify_request.deferred.callback((
|
||||
server_name,
|
||||
key_id,
|
||||
result_keys[key_id],
|
||||
))
|
||||
verify_request.deferred.callback(
|
||||
(server_name, key_id, key)
|
||||
)
|
||||
break
|
||||
else:
|
||||
# The else block is only reached if the loop above
|
||||
@@ -324,13 +327,14 @@ class Keyring(object):
|
||||
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in requests_missing_keys:
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
verify_request.deferred.errback(
|
||||
SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s"
|
||||
% (verify_request.server_name, verify_request.key_ids),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
)
|
||||
|
||||
def on_err(err):
|
||||
with PreserveLoggingContext():
|
||||
@@ -343,27 +347,24 @@ class Keyring(object):
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
"""
|
||||
|
||||
Args:
|
||||
server_name_and_key_ids (list[(str, iterable[str])]):
|
||||
server_name_and_key_ids (iterable(Tuple[str, iterable[str]]):
|
||||
list of (server_name, iterable[key_id]) tuples to fetch keys for
|
||||
|
||||
Returns:
|
||||
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||
Deferred: resolves to dict[str, dict[str, VerifyKey|None]]: map from
|
||||
server_name -> key_id -> VerifyKey
|
||||
"""
|
||||
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.get_server_verify_keys,
|
||||
server_name, key_ids,
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(dict(res))
|
||||
keys_to_fetch = (
|
||||
(server_name, key_id)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
for key_id in key_ids
|
||||
)
|
||||
res = yield self.store.get_server_verify_keys(keys_to_fetch)
|
||||
keys = {}
|
||||
for (server_name, key_id), key in res.items():
|
||||
keys.setdefault(server_name, {})[key_id] = key
|
||||
defer.returnValue(keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_perspectives(self, server_name_and_key_ids):
|
||||
@@ -375,25 +376,26 @@ class Keyring(object):
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
"Key lookup failed from %r: %s", perspective_name, e,
|
||||
)
|
||||
logger.warning("Key lookup failed from %r: %s", perspective_name, e)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e),
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(get_key, p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
results = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(get_key, p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
union_of_keys = {}
|
||||
for result in results:
|
||||
@@ -404,32 +406,30 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_server(self, server_name_and_key_ids):
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.get_server_verify_key_v2_direct,
|
||||
server_name,
|
||||
key_ids,
|
||||
)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
results = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.get_server_verify_key_v2_direct, server_name, key_ids
|
||||
)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
merged = {}
|
||||
for result in results:
|
||||
merged.update(result)
|
||||
|
||||
defer.returnValue({
|
||||
server_name: keys
|
||||
for server_name, keys in merged.items()
|
||||
if keys
|
||||
})
|
||||
defer.returnValue(
|
||||
{server_name: keys for server_name, keys in merged.items() if keys}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
|
||||
perspective_name,
|
||||
perspective_keys):
|
||||
def get_server_verify_key_v2_indirect(
|
||||
self, server_names_and_key_ids, perspective_name, perspective_keys
|
||||
):
|
||||
# TODO(mark): Set the minimum_valid_until_ts to that needed by
|
||||
# the events being validated or the current time if validating
|
||||
# an incoming request.
|
||||
@@ -440,9 +440,7 @@ class Keyring(object):
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
@@ -450,21 +448,19 @@ class Keyring(object):
|
||||
long_retries=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
raise_from(KeyLookupError("Failed to connect to remote server"), e)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
raise_from(KeyLookupError("Remote server returned an error"), e)
|
||||
|
||||
keys = {}
|
||||
|
||||
responses = query_response["server_keys"]
|
||||
|
||||
for response in responses:
|
||||
if (u"signatures" not in response
|
||||
or perspective_name not in response[u"signatures"]):
|
||||
if (
|
||||
u"signatures" not in response
|
||||
or perspective_name not in response[u"signatures"]
|
||||
):
|
||||
raise KeyLookupError(
|
||||
"Key response not signed by perspective server"
|
||||
" %r" % (perspective_name,)
|
||||
@@ -474,9 +470,7 @@ class Keyring(object):
|
||||
for key_id in response[u"signatures"][perspective_name]:
|
||||
if key_id in perspective_keys:
|
||||
verify_signed_json(
|
||||
response,
|
||||
perspective_name,
|
||||
perspective_keys[key_id]
|
||||
response, perspective_name, perspective_keys[key_id]
|
||||
)
|
||||
verified = True
|
||||
|
||||
@@ -486,7 +480,7 @@ class Keyring(object):
|
||||
" known key, signed with: %r, known keys: %r",
|
||||
perspective_name,
|
||||
list(response[u"signatures"][perspective_name]),
|
||||
list(perspective_keys)
|
||||
list(perspective_keys),
|
||||
)
|
||||
raise KeyLookupError(
|
||||
"Response not signed with a known key for perspective"
|
||||
@@ -494,30 +488,32 @@ class Keyring(object):
|
||||
)
|
||||
|
||||
processed_response = yield self.process_v2_response(
|
||||
perspective_name, response, only_from_server=False
|
||||
perspective_name, response
|
||||
)
|
||||
server_name = response["server_name"]
|
||||
|
||||
for server_name, response_keys in processed_response.items():
|
||||
keys.setdefault(server_name, {}).update(response_keys)
|
||||
keys.setdefault(server_name, {}).update(processed_response)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
)
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError))
|
||||
yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
)
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v2_direct(self, server_name, key_ids):
|
||||
keys = {}
|
||||
keys = {} # type: dict[str, nacl.signing.VerifyKey]
|
||||
|
||||
for requested_key_id in key_ids:
|
||||
if requested_key_id in keys:
|
||||
@@ -526,22 +522,27 @@ class Keyring(object):
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
path="/_matrix/key/v2/server/"
|
||||
+ urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
raise_from(KeyLookupError("Failed to connect to remote server"), e)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
raise_from(KeyLookupError("Remote server returned an error"), e)
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
if (
|
||||
u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]
|
||||
):
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if response["server_name"] != server_name:
|
||||
raise KeyLookupError(
|
||||
"Expected a response for server %r not %r"
|
||||
% (server_name, response["server_name"])
|
||||
)
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
from_server=server_name,
|
||||
requested_ids=[requested_key_id],
|
||||
@@ -550,24 +551,41 @@ class Keyring(object):
|
||||
|
||||
keys.update(response_keys)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=key_server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
)
|
||||
for key_server_name, verify_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
defer.returnValue(keys)
|
||||
yield self.store_keys(
|
||||
server_name=server_name, from_server=server_name, verify_keys=keys
|
||||
)
|
||||
defer.returnValue({server_name: keys})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_v2_response(self, from_server, response_json,
|
||||
requested_ids=[], only_from_server=True):
|
||||
def process_v2_response(self, from_server, response_json, requested_ids=[]):
|
||||
"""Parse a 'Server Keys' structure from the result of a /key request
|
||||
|
||||
This is used to parse either the entirety of the response from
|
||||
GET /_matrix/key/v2/server, or a single entry from the list returned by
|
||||
POST /_matrix/key/v2/query.
|
||||
|
||||
Checks that each signature in the response that claims to come from the origin
|
||||
server is valid. (Does not check that there actually is such a signature, for
|
||||
some reason.)
|
||||
|
||||
Stores the json in server_keys_json so that it can be used for future responses
|
||||
to /_matrix/key/v2/query.
|
||||
|
||||
Args:
|
||||
from_server (str): the name of the server producing this result: either
|
||||
the origin server for a /_matrix/key/v2/server request, or the notary
|
||||
for a /_matrix/key/v2/query.
|
||||
|
||||
response_json (dict): the json-decoded Server Keys response object
|
||||
|
||||
requested_ids (iterable[str]): a list of the key IDs that were requested.
|
||||
We will store the json for these key ids as well as any that are
|
||||
actually in the response
|
||||
|
||||
Returns:
|
||||
Deferred[dict[str, nacl.signing.VerifyKey]]:
|
||||
map from key_id to key object
|
||||
"""
|
||||
time_now_ms = self.clock.time_msec()
|
||||
response_keys = {}
|
||||
verify_keys = {}
|
||||
@@ -589,32 +607,17 @@ class Keyring(object):
|
||||
verify_key.time_added = time_now_ms
|
||||
old_verify_keys[key_id] = verify_key
|
||||
|
||||
results = {}
|
||||
server_name = response_json["server_name"]
|
||||
if only_from_server:
|
||||
if server_name != from_server:
|
||||
raise KeyLookupError(
|
||||
"Expected a response for server %r not %r" % (
|
||||
from_server, server_name
|
||||
)
|
||||
)
|
||||
for key_id in response_json["signatures"].get(server_name, {}):
|
||||
if key_id not in response_json["verify_keys"]:
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
"Key response must include verification keys for all" " signatures"
|
||||
)
|
||||
if key_id in verify_keys:
|
||||
verify_signed_json(
|
||||
response_json,
|
||||
server_name,
|
||||
verify_keys[key_id]
|
||||
)
|
||||
verify_signed_json(response_json, server_name, verify_keys[key_id])
|
||||
|
||||
signed_key_json = sign_json(
|
||||
response_json,
|
||||
self.config.server_name,
|
||||
self.config.signing_key[0],
|
||||
response_json, self.config.server_name, self.config.signing_key[0]
|
||||
)
|
||||
|
||||
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||
@@ -627,25 +630,25 @@ class Keyring(object):
|
||||
response_keys.update(verify_keys)
|
||||
response_keys.update(old_verify_keys)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.store_server_keys_json,
|
||||
server_name=server_name,
|
||||
key_id=key_id,
|
||||
from_server=server_name,
|
||||
ts_now_ms=time_now_ms,
|
||||
ts_expires_ms=ts_valid_until_ms,
|
||||
key_json_bytes=signed_key_json_bytes,
|
||||
)
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.store_server_keys_json,
|
||||
server_name=server_name,
|
||||
key_id=key_id,
|
||||
from_server=from_server,
|
||||
ts_now_ms=time_now_ms,
|
||||
ts_expires_ms=ts_valid_until_ms,
|
||||
key_json_bytes=signed_key_json_bytes,
|
||||
)
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
results[server_name] = response_keys
|
||||
|
||||
defer.returnValue(results)
|
||||
defer.returnValue(response_keys)
|
||||
|
||||
def store_keys(self, server_name, from_server, verify_keys):
|
||||
"""Store a collection of verify keys for a given server
|
||||
@@ -657,16 +660,21 @@ class Keyring(object):
|
||||
A deferred that completes when the keys are stored.
|
||||
"""
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.store_server_verify_key,
|
||||
server_name, server_name, key.time_added, key
|
||||
)
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
return logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.store_server_verify_key,
|
||||
server_name,
|
||||
server_name,
|
||||
key.time_added,
|
||||
key,
|
||||
)
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -689,17 +697,19 @@ def _handle_key_deferred(verify_request):
|
||||
except KeyLookupError as e:
|
||||
logger.warn(
|
||||
"Failed to download keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e),
|
||||
server_name,
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Error downloading keys for %s" % (server_name,),
|
||||
Codes.UNAUTHORIZED,
|
||||
502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e),
|
||||
server_name,
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
@@ -709,22 +719,24 @@ def _handle_key_deferred(verify_request):
|
||||
|
||||
json_object = verify_request.json_object
|
||||
|
||||
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||
key_id, verify_key.alg, verify_key.version, server_name,
|
||||
))
|
||||
logger.debug(
|
||||
"Got key %s %s:%s for server %s, verifying"
|
||||
% (key_id, verify_key.alg, verify_key.version, server_name)
|
||||
)
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except SignatureVerifyException as e:
|
||||
logger.debug(
|
||||
"Error verifying signature for %s:%s:%s with key %s: %s",
|
||||
server_name, verify_key.alg, verify_key.version,
|
||||
server_name,
|
||||
verify_key.alg,
|
||||
verify_key.version,
|
||||
encode_verify_key_base64(verify_key),
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s: %s" % (
|
||||
server_name, verify_key.alg, verify_key.version, str(e),
|
||||
),
|
||||
"Invalid signature for server %s with key %s:%s: %s"
|
||||
% (server_name, verify_key.alg, verify_key.version, str(e)),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
@@ -335,13 +336,32 @@ class FrozenEventV2(EventBase):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
|
||||
return "<%s event_id='%s', type='%s', state_key='%s'>" % (
|
||||
self.__class__.__name__,
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
|
||||
class FrozenEventV3(FrozenEventV2):
|
||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||
format_version = EventFormatVersions.V3 # All events of this type are V3
|
||||
|
||||
@property
|
||||
def event_id(self):
|
||||
# We have to import this here as otherwise we get an import loop which
|
||||
# is hard to break.
|
||||
from synapse.crypto.event_signing import compute_event_reference_hash
|
||||
|
||||
if self._event_id:
|
||||
return self._event_id
|
||||
self._event_id = "$" + encode_base64(
|
||||
compute_event_reference_hash(self)[1], urlsafe=True
|
||||
)
|
||||
return self._event_id
|
||||
|
||||
|
||||
def room_version_to_event_format(room_version):
|
||||
"""Converts a room version string to the event format
|
||||
|
||||
@@ -350,12 +370,15 @@ def room_version_to_event_format(room_version):
|
||||
|
||||
Returns:
|
||||
int
|
||||
|
||||
Raises:
|
||||
UnsupportedRoomVersionError if the room version is unknown
|
||||
"""
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
|
||||
if not v:
|
||||
# We should have already checked version, so this should not happen
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
# this can happen if support is withdrawn for a room version
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
return v.event_format
|
||||
|
||||
@@ -376,6 +399,8 @@ def event_type_from_format_version(format_version):
|
||||
return FrozenEvent
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
return FrozenEventV2
|
||||
elif format_version == EventFormatVersions.V3:
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception(
|
||||
"No event format %r" % (format_version,)
|
||||
|
||||
@@ -18,6 +18,7 @@ import attr
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
@@ -178,9 +179,8 @@ class EventBuilderFactory(object):
|
||||
"""
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
raise Exception(
|
||||
"No event format defined for version %r" % (room_version,)
|
||||
)
|
||||
# this can happen if support is withdrawn for a room version
|
||||
raise UnsupportedRoomVersionError()
|
||||
return self.for_room_version(v, key_values)
|
||||
|
||||
def for_room_version(self, room_version, key_values):
|
||||
|
||||
@@ -187,7 +187,9 @@ class EventContext(object):
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
@@ -205,7 +207,9 @@ class EventContext(object):
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd.
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -19,7 +19,10 @@ from six import string_types
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
|
||||
from . import EventBase
|
||||
|
||||
@@ -311,3 +314,92 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
d = only_fields(d, only_event_fields)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class EventClientSerializer(object):
|
||||
"""Serializes events that are to be sent to clients.
|
||||
|
||||
This is used for bundling extra information with any events to be sent to
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.experimental_msc1849_support_enabled = (
|
||||
hs.config.experimental_msc1849_support_enabled
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize_event(self, event, time_now, **kwargs):
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
event (EventBase)
|
||||
time_now (int): The current time in milliseconds
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: The serialized event
|
||||
"""
|
||||
# To handle the case of presence events and the like
|
||||
if not isinstance(event, EventBase):
|
||||
defer.returnValue(event)
|
||||
|
||||
event_id = event.event_id
|
||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||
|
||||
# If MSC1849 is enabled then we need to look if thre are any relations
|
||||
# we need to bundle in with the event
|
||||
if self.experimental_msc1849_support_enabled:
|
||||
annotations = yield self.store.get_aggregation_groups_for_event(
|
||||
event_id,
|
||||
)
|
||||
references = yield self.store.get_relations_for_event(
|
||||
event_id, RelationTypes.REFERENCE, direction="f",
|
||||
)
|
||||
|
||||
if annotations.chunk:
|
||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||
r[RelationTypes.ANNOTATION] = annotations.to_dict()
|
||||
|
||||
if references.chunk:
|
||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||
r[RelationTypes.REFERENCE] = references.to_dict()
|
||||
|
||||
edit = None
|
||||
if event.type == EventTypes.Message:
|
||||
edit = yield self.store.get_applicable_edit(event_id)
|
||||
|
||||
if edit:
|
||||
# If there is an edit replace the content, preserving existing
|
||||
# relations.
|
||||
|
||||
relations = event.content.get("m.relates_to")
|
||||
serialized_event["content"] = edit.content.get("m.new_content", {})
|
||||
if relations:
|
||||
serialized_event["content"]["m.relates_to"] = relations
|
||||
else:
|
||||
serialized_event["content"].pop("m.relates_to", None)
|
||||
|
||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||
r[RelationTypes.REPLACE] = {
|
||||
"event_id": edit.event_id,
|
||||
}
|
||||
|
||||
defer.returnValue(serialized_event)
|
||||
|
||||
def serialize_events(self, events, time_now, **kwargs):
|
||||
"""Serializes multiple events.
|
||||
|
||||
Args:
|
||||
event (iter[EventBase])
|
||||
time_now (int): The current time in milliseconds
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
Deferred[list[dict]]: The list of serialized events
|
||||
"""
|
||||
return yieldable_gather_results(
|
||||
self.serialize_event, events,
|
||||
time_now=time_now, **kwargs
|
||||
)
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
|
||||
from six import string_types
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import EventFormatVersions
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
|
||||
@@ -56,6 +56,17 @@ class EventValidator(object):
|
||||
if not isinstance(getattr(event, s), string_types):
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
if event.type == EventTypes.Aliases:
|
||||
if "aliases" in event.content:
|
||||
for alias in event.content["aliases"]:
|
||||
if len(alias) > MAX_ALIAS_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
("Can't create aliases longer than"
|
||||
" %d characters" % (MAX_ALIAS_LENGTH,)),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
def validate_builder(self, event):
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
|
||||
@@ -269,7 +269,18 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
for p in pdus_to_check_sender
|
||||
])
|
||||
|
||||
def sender_err(e, pdu_to_check):
|
||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||
pdu_to_check.pdu.event_id,
|
||||
pdu_to_check.sender_domain,
|
||||
e.getErrorMessage(),
|
||||
)
|
||||
# XX not really sure if these are the right codes, but they are what
|
||||
# we've done for ages
|
||||
raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
|
||||
|
||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||
d.addErrback(sender_err, p)
|
||||
p.deferreds.append(d)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
@@ -291,7 +302,18 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
for p in pdus_to_check_event_id
|
||||
])
|
||||
|
||||
def event_err(e, pdu_to_check):
|
||||
errmsg = (
|
||||
"event id %s: unable to verify signature for event id domain: %s" % (
|
||||
pdu_to_check.pdu.event_id,
|
||||
e.getErrorMessage(),
|
||||
)
|
||||
)
|
||||
# XX as above: not really sure if these are the right codes
|
||||
raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
|
||||
|
||||
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
||||
d.addErrback(event_err, p)
|
||||
p.deferreds.append(d)
|
||||
|
||||
# replace lists of deferreds with single Deferreds
|
||||
|
||||
@@ -33,6 +33,7 @@ from synapse.api.errors import (
|
||||
IncompatibleRoomVersionError,
|
||||
NotFoundError,
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
@@ -198,11 +199,22 @@ class FederationServer(FederationBase):
|
||||
|
||||
try:
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
||||
try:
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
except UnsupportedRoomVersionError:
|
||||
# this can happen if support for a given room version is withdrawn,
|
||||
# so that we still get events for said room.
|
||||
logger.info(
|
||||
"Ignoring PDU for room %s with unknown version %s",
|
||||
room_id,
|
||||
room_version,
|
||||
)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p, format_ver)
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
|
||||
@@ -33,12 +33,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage import UserPresenceState
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
|
||||
# This is defined in the Matrix spec and enforced by the receiver.
|
||||
MAX_EDUS_PER_TRANSACTION = 100
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
sent_edus_counter = Counter(
|
||||
"synapse_federation_client_sent_edus",
|
||||
"Total number of EDUs successfully sent",
|
||||
"synapse_federation_client_sent_edus", "Total number of EDUs successfully sent"
|
||||
)
|
||||
|
||||
sent_edus_by_type = Counter(
|
||||
@@ -58,6 +60,7 @@ class PerDestinationQueue(object):
|
||||
destination (str): the server_name of the destination that we are managing
|
||||
transmission for.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transaction_manager, destination):
|
||||
self._server_name = hs.hostname
|
||||
self._clock = hs.get_clock()
|
||||
@@ -68,17 +71,17 @@ class PerDestinationQueue(object):
|
||||
self.transmission_loop_running = False
|
||||
|
||||
# a list of tuples of (pending pdu, order)
|
||||
self._pending_pdus = [] # type: list[tuple[EventBase, int]]
|
||||
self._pending_edus = [] # type: list[Edu]
|
||||
self._pending_pdus = [] # type: list[tuple[EventBase, int]]
|
||||
self._pending_edus = [] # type: list[Edu]
|
||||
|
||||
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
|
||||
# based on their key (e.g. typing events by room_id)
|
||||
# Map of (edu_type, key) -> Edu
|
||||
self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu]
|
||||
self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu]
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence = {} # type: dict[str, UserPresenceState]
|
||||
self._pending_presence = {} # type: dict[str, UserPresenceState]
|
||||
|
||||
# room_id -> receipt_type -> user_id -> receipt_dict
|
||||
self._pending_rrs = {}
|
||||
@@ -120,9 +123,7 @@ class PerDestinationQueue(object):
|
||||
Args:
|
||||
states (iterable[UserPresenceState]): presence to send
|
||||
"""
|
||||
self._pending_presence.update({
|
||||
state.user_id: state for state in states
|
||||
})
|
||||
self._pending_presence.update({state.user_id: state for state in states})
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def queue_read_receipt(self, receipt):
|
||||
@@ -132,14 +133,9 @@ class PerDestinationQueue(object):
|
||||
Args:
|
||||
receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
|
||||
"""
|
||||
self._pending_rrs.setdefault(
|
||||
receipt.room_id, {},
|
||||
).setdefault(
|
||||
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
|
||||
receipt.receipt_type, {}
|
||||
)[receipt.user_id] = {
|
||||
"event_ids": receipt.event_ids,
|
||||
"data": receipt.data,
|
||||
}
|
||||
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id):
|
||||
# if we don't have any read-receipts for this room, it may be that we've already
|
||||
@@ -170,10 +166,7 @@ class PerDestinationQueue(object):
|
||||
# request at which point pending_pdus just keeps growing.
|
||||
# we need application-layer timeouts of some flavour of these
|
||||
# requests
|
||||
logger.debug(
|
||||
"TX [%s] Transaction already in progress",
|
||||
self._destination
|
||||
)
|
||||
logger.debug("TX [%s] Transaction already in progress", self._destination)
|
||||
return
|
||||
|
||||
logger.debug("TX [%s] Starting transaction loop", self._destination)
|
||||
@@ -197,7 +190,8 @@ class PerDestinationQueue(object):
|
||||
pending_pdus = []
|
||||
while True:
|
||||
device_message_edus, device_stream_id, dev_list_id = (
|
||||
yield self._get_new_device_messages()
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2)
|
||||
)
|
||||
|
||||
# BEGIN CRITICAL SECTION
|
||||
@@ -216,19 +210,9 @@ class PerDestinationQueue(object):
|
||||
|
||||
pending_edus = []
|
||||
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=False))
|
||||
|
||||
# We can only include at most 100 EDUs per transactions
|
||||
pending_edus.extend(self._pop_pending_edus(100 - len(pending_edus)))
|
||||
|
||||
pending_edus.extend(
|
||||
self._pending_edus_keyed.values()
|
||||
)
|
||||
|
||||
self._pending_edus_keyed = {}
|
||||
|
||||
pending_edus.extend(device_message_edus)
|
||||
|
||||
# rr_edus and pending_presence take at most one slot each
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=False))
|
||||
pending_presence = self._pending_presence
|
||||
self._pending_presence = {}
|
||||
if pending_presence:
|
||||
@@ -248,9 +232,23 @@ class PerDestinationQueue(object):
|
||||
)
|
||||
)
|
||||
|
||||
pending_edus.extend(device_message_edus)
|
||||
pending_edus.extend(
|
||||
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self._pending_edus_keyed
|
||||
):
|
||||
_, val = self._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination, len(pending_pdus))
|
||||
logger.debug(
|
||||
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination,
|
||||
len(pending_pdus),
|
||||
)
|
||||
|
||||
if not pending_pdus and not pending_edus:
|
||||
logger.debug("TX [%s] Nothing to send", self._destination)
|
||||
@@ -259,7 +257,7 @@ class PerDestinationQueue(object):
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < 100:
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=True))
|
||||
|
||||
# END CRITICAL SECTION
|
||||
@@ -303,22 +301,25 @@ class PerDestinationQueue(object):
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"TX [%s] Received %d response to transaction: %s",
|
||||
self._destination, e.code, e,
|
||||
self._destination,
|
||||
e.code,
|
||||
e,
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning("TX [%s] Failed to send transaction: %s", self._destination, e)
|
||||
logger.warning(
|
||||
"TX [%s] Failed to send transaction: %s", self._destination, e
|
||||
)
|
||||
|
||||
for p, _ in pending_pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id,
|
||||
self._destination)
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"TX [%s] Failed to send transaction",
|
||||
self._destination,
|
||||
)
|
||||
logger.exception("TX [%s] Failed to send transaction", self._destination)
|
||||
for p, _ in pending_pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id,
|
||||
self._destination)
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
finally:
|
||||
# We want to be *very* sure we clear this after we stop processing
|
||||
self.transmission_loop_running = False
|
||||
@@ -346,27 +347,13 @@ class PerDestinationQueue(object):
|
||||
return pending_edus
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self):
|
||||
last_device_stream_id = self._last_device_stream_id
|
||||
to_device_stream_id = self._store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
|
||||
self._destination, last_device_stream_id, to_device_stream_id
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.direct_to_device",
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
]
|
||||
|
||||
def _get_new_device_messages(self, limit):
|
||||
last_device_list = self._last_device_list_stream_id
|
||||
# Will return at most 20 entries
|
||||
now_stream_id, results = yield self._store.get_devices_by_remote(
|
||||
self._destination, last_device_list
|
||||
)
|
||||
edus.extend(
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
@@ -374,5 +361,26 @@ class PerDestinationQueue(object):
|
||||
content=content,
|
||||
)
|
||||
for content in results
|
||||
]
|
||||
|
||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||
|
||||
last_device_stream_id = self._last_device_stream_id
|
||||
to_device_stream_id = self._store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
|
||||
self._destination,
|
||||
last_device_stream_id,
|
||||
to_device_stream_id,
|
||||
limit - len(edus),
|
||||
)
|
||||
edus.extend(
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.direct_to_device",
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
)
|
||||
|
||||
defer.returnValue((edus, stream_id, now_stream_id))
|
||||
|
||||
@@ -63,11 +63,7 @@ class TransportLayerServer(JsonResource):
|
||||
self.authenticator = Authenticator(hs)
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
self.clock,
|
||||
window_size=hs.config.federation_rc_window_size,
|
||||
sleep_limit=hs.config.federation_rc_sleep_limit,
|
||||
sleep_msec=hs.config.federation_rc_sleep_delay,
|
||||
reject_limit=hs.config.federation_rc_reject_limit,
|
||||
concurrent_requests=hs.config.federation_rc_concurrent,
|
||||
config=hs.config.rc_federation,
|
||||
)
|
||||
|
||||
self.register_servlets()
|
||||
@@ -716,8 +712,17 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
PATH = "/publicRooms"
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access):
|
||||
super(PublicRoomList, self).__init__(
|
||||
handler, authenticator, ratelimiter, server_name,
|
||||
)
|
||||
self.deny_access = deny_access
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
if self.deny_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
@@ -1417,6 +1422,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
deny_access=hs.config.restrict_public_rooms_to_local_users,
|
||||
).register(resource)
|
||||
|
||||
if "group_server" in servlet_groups:
|
||||
|
||||
@@ -90,8 +90,8 @@ class BaseHandler(object):
|
||||
messages_per_second = override.messages_per_second
|
||||
burst_count = override.burst_count
|
||||
else:
|
||||
messages_per_second = self.hs.config.rc_messages_per_second
|
||||
burst_count = self.hs.config.rc_message_burst_count
|
||||
messages_per_second = self.hs.config.rc_message.per_second
|
||||
burst_count = self.hs.config.rc_message.burst_count
|
||||
|
||||
allowed, time_allowed = self.ratelimiter.can_do_action(
|
||||
user_id, time_now,
|
||||
|
||||
253
synapse/handlers/account_validity.py
Normal file
253
synapse/handlers/account_validity.py
Normal file
@@ -0,0 +1,253 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
try:
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
except ImportError:
|
||||
load_jinja2_templates = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AccountValidityHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = self.hs.get_datastore()
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.clock = self.hs.get_clock()
|
||||
|
||||
self._account_validity = self.hs.config.account_validity
|
||||
|
||||
if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
|
||||
# Don't do email-specific configuration if renewal by email is disabled.
|
||||
try:
|
||||
app_name = self.hs.config.email_app_name
|
||||
|
||||
self._subject = self._account_validity.renew_email_subject % {
|
||||
"app": app_name,
|
||||
}
|
||||
|
||||
self._from_string = self.hs.config.email_notif_from % {
|
||||
"app": app_name,
|
||||
}
|
||||
except Exception:
|
||||
# If substitution failed, fall back to the bare strings.
|
||||
self._subject = self._account_validity.renew_email_subject
|
||||
self._from_string = self.hs.config.email_notif_from
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
self._template_html, self._template_text = load_jinja2_templates(
|
||||
config=self.hs.config,
|
||||
template_html_name=self.hs.config.email_expiry_template_html,
|
||||
template_text_name=self.hs.config.email_expiry_template_text,
|
||||
)
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
self.clock.looping_call(
|
||||
self.send_renewal_emails,
|
||||
30 * 60 * 1000,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_renewal_emails(self):
|
||||
"""Gets the list of users whose account is expiring in the amount of time
|
||||
configured in the ``renew_at`` parameter from the ``account_validity``
|
||||
configuration, and sends renewal emails to all of these users as long as they
|
||||
have an email 3PID attached to their account.
|
||||
"""
|
||||
expiring_users = yield self.store.get_users_expiring_soon()
|
||||
|
||||
if expiring_users:
|
||||
for user in expiring_users:
|
||||
yield self._send_renewal_email(
|
||||
user_id=user["user_id"],
|
||||
expiration_ts=user["expiration_ts_ms"],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_renewal_email_to_user(self, user_id):
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
yield self._send_renewal_email(user_id, expiration_ts)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_renewal_email(self, user_id, expiration_ts):
|
||||
"""Sends out a renewal email to every email address attached to the given user
|
||||
with a unique link allowing them to renew their account.
|
||||
|
||||
Args:
|
||||
user_id (str): ID of the user to send email(s) to.
|
||||
expiration_ts (int): Timestamp in milliseconds for the expiration date of
|
||||
this user's account (used in the email templates).
|
||||
"""
|
||||
addresses = yield self._get_email_addresses_for_user(user_id)
|
||||
|
||||
# Stop right here if the user doesn't have at least one email address.
|
||||
# In this case, they will have to ask their server admin to renew their
|
||||
# account manually.
|
||||
if not addresses:
|
||||
return
|
||||
|
||||
try:
|
||||
user_display_name = yield self.store.get_profile_displayname(
|
||||
UserID.from_string(user_id).localpart
|
||||
)
|
||||
if user_display_name is None:
|
||||
user_display_name = user_id
|
||||
except StoreError:
|
||||
user_display_name = user_id
|
||||
|
||||
renewal_token = yield self._get_renewal_token(user_id)
|
||||
url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
renewal_token,
|
||||
)
|
||||
|
||||
template_vars = {
|
||||
"display_name": user_display_name,
|
||||
"expiration_ts": expiration_ts,
|
||||
"url": url,
|
||||
}
|
||||
|
||||
html_text = self._template_html.render(**template_vars)
|
||||
html_part = MIMEText(html_text, "html", "utf8")
|
||||
|
||||
plain_text = self._template_text.render(**template_vars)
|
||||
text_part = MIMEText(plain_text, "plain", "utf8")
|
||||
|
||||
for address in addresses:
|
||||
raw_to = email.utils.parseaddr(address)[1]
|
||||
|
||||
multipart_msg = MIMEMultipart('alternative')
|
||||
multipart_msg['Subject'] = self._subject
|
||||
multipart_msg['From'] = self._from_string
|
||||
multipart_msg['To'] = address
|
||||
multipart_msg['Date'] = email.utils.formatdate()
|
||||
multipart_msg['Message-ID'] = email.utils.make_msgid()
|
||||
multipart_msg.attach(text_part)
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending renewal email to %s", address)
|
||||
|
||||
yield make_deferred_yieldable(self.sendmail(
|
||||
self.hs.config.email_smtp_host,
|
||||
self._raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
|
||||
reactor=self.hs.get_reactor(),
|
||||
port=self.hs.config.email_smtp_port,
|
||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||
username=self.hs.config.email_smtp_user,
|
||||
password=self.hs.config.email_smtp_pass,
|
||||
requireTransportSecurity=self.hs.config.require_transport_security
|
||||
))
|
||||
|
||||
yield self.store.set_renewal_mail_status(
|
||||
user_id=user_id,
|
||||
email_sent=True,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_email_addresses_for_user(self, user_id):
|
||||
"""Retrieve the list of email addresses attached to a user's account.
|
||||
|
||||
Args:
|
||||
user_id (str): ID of the user to lookup email addresses for.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[list[str]]: Email addresses for this account.
|
||||
"""
|
||||
threepids = yield self.store.user_get_threepids(user_id)
|
||||
|
||||
addresses = []
|
||||
for threepid in threepids:
|
||||
if threepid["medium"] == "email":
|
||||
addresses.append(threepid["address"])
|
||||
|
||||
defer.returnValue(addresses)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_renewal_token(self, user_id):
|
||||
"""Generates a 32-byte long random string that will be inserted into the
|
||||
user's renewal email's unique link, then saves it into the database.
|
||||
|
||||
Args:
|
||||
user_id (str): ID of the user to generate a string for.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[str]: The generated string.
|
||||
|
||||
Raises:
|
||||
StoreError(500): Couldn't generate a unique string after 5 attempts.
|
||||
"""
|
||||
attempts = 0
|
||||
while attempts < 5:
|
||||
try:
|
||||
renewal_token = stringutils.random_string(32)
|
||||
yield self.store.set_renewal_token_for_user(user_id, renewal_token)
|
||||
defer.returnValue(renewal_token)
|
||||
except StoreError:
|
||||
attempts += 1
|
||||
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def renew_account(self, renewal_token):
|
||||
"""Renews the account attached to a given renewal token by pushing back the
|
||||
expiration date by the current validity period in the server's configuration.
|
||||
|
||||
Args:
|
||||
renewal_token (str): Token sent with the renewal request.
|
||||
"""
|
||||
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
|
||||
logger.debug("Renewing an account for user %s", user_id)
|
||||
yield self.renew_account_for_user(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
|
||||
"""Renews the account attached to a given user by pushing back the
|
||||
expiration date by the current validity period in the server's
|
||||
configuration.
|
||||
|
||||
Args:
|
||||
renewal_token (str): Token sent with the renewal request.
|
||||
expiration_ts (int): New expiration date. Defaults to now + validity period.
|
||||
email_sent (bool): Whether an email has been sent for this validity period.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[int]: New expiration date for this account, as a timestamp
|
||||
in milliseconds since epoch.
|
||||
"""
|
||||
if expiration_ts is None:
|
||||
expiration_ts = self.clock.time_msec() + self._account_validity.period
|
||||
|
||||
yield self.store.set_account_validity_for_user(
|
||||
user_id=user_id,
|
||||
expiration_ts=expiration_ts,
|
||||
email_sent=email_sent,
|
||||
)
|
||||
|
||||
defer.returnValue(expiration_ts)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user