Compare commits
365 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c1b83b007 | ||
|
|
8f6d9c4cf0 | ||
|
|
0001e8397e | ||
|
|
197b08de35 | ||
|
|
099c96b89b | ||
|
|
2fb7794e60 | ||
|
|
bbe39f808c | ||
|
|
880aaac1d8 | ||
|
|
abf1e5c526 | ||
|
|
0d0bc35792 | ||
|
|
5e4a438556 | ||
|
|
71d65407e7 | ||
|
|
fa64f836ec | ||
|
|
5a5abd55e8 | ||
|
|
603618c002 | ||
|
|
709e81f518 | ||
|
|
a0a1fd0bec | ||
|
|
b58d17e44f | ||
|
|
771d70e89c | ||
|
|
f31a94a6dd | ||
|
|
61b457e3ec | ||
|
|
adfaea8c69 | ||
|
|
3f1cd14791 | ||
|
|
a0d2f9d089 | ||
|
|
d484126bf7 | ||
|
|
8a380d0fe2 | ||
|
|
818def8248 | ||
|
|
9801a042f3 | ||
|
|
bfbe2f5b08 | ||
|
|
7a782c32a2 | ||
|
|
b1255077f5 | ||
|
|
d009535639 | ||
|
|
ba7a523854 | ||
|
|
e837be5b5c | ||
|
|
3c67eee6dc | ||
|
|
fe3941f6e3 | ||
|
|
8ee0d74516 | ||
|
|
3be2abd0a9 | ||
|
|
bc831d1d9a | ||
|
|
0a714c3abf | ||
|
|
7718fabb7a | ||
|
|
fd6d83ed96 | ||
|
|
d2455ec3aa | ||
|
|
3404ad289b | ||
|
|
46fa66bbfd | ||
|
|
10027c80b0 | ||
|
|
5a78f47f6e | ||
|
|
9551911f88 | ||
|
|
43b2be9764 | ||
|
|
32873efa87 | ||
|
|
97a42bbc3a | ||
|
|
02e89021f5 | ||
|
|
49f877d32e | ||
|
|
ffe1fc111d | ||
|
|
79460ce9c9 | ||
|
|
71cc6bab5f | ||
|
|
36af094017 | ||
|
|
65bdc35a1f | ||
|
|
df1c98c22a | ||
|
|
f3f142259e | ||
|
|
0cb83cde70 | ||
|
|
ef9c275d96 | ||
|
|
12bbcc255a | ||
|
|
5820ed905f | ||
|
|
361de49c90 | ||
|
|
f48bf4febd | ||
|
|
dc3f998706 | ||
|
|
862669d6cc | ||
|
|
459d089af7 | ||
|
|
e88a5dd108 | ||
|
|
e45a7c0939 | ||
|
|
f092029d2d | ||
|
|
6cd34da8b1 | ||
|
|
d8994942f2 | ||
|
|
08e050c3fd | ||
|
|
47acbc519f | ||
|
|
d9239b5257 | ||
|
|
7b8d654a61 | ||
|
|
fdb816713a | ||
|
|
3dd2b5f5e3 | ||
|
|
ba547ec3a9 | ||
|
|
a0c4769f1a | ||
|
|
6b21986e4e | ||
|
|
705c978366 | ||
|
|
a443d2a25d | ||
|
|
88d41e94f5 | ||
|
|
856b2a9555 | ||
|
|
78d170262c | ||
|
|
aa7e4291ee | ||
|
|
9e45d573d4 | ||
|
|
605cd089f7 | ||
|
|
3edc65dd24 | ||
|
|
a92e703ab9 | ||
|
|
01209382fb | ||
|
|
3a3118f4ec | ||
|
|
db0fee738d | ||
|
|
3de57e7062 | ||
|
|
8e64c5a24c | ||
|
|
cc0800ebfc | ||
|
|
fe73f0d533 | ||
|
|
21db35f77e | ||
|
|
e1d858984d | ||
|
|
799001f2c0 | ||
|
|
b08b0a22d5 | ||
|
|
de2d267375 | ||
|
|
56ca93ef59 | ||
|
|
f4884444c3 | ||
|
|
e1b240329e | ||
|
|
7765bf3989 | ||
|
|
928edef979 | ||
|
|
b0c8bdd49d | ||
|
|
bce557175b | ||
|
|
99fcc96289 | ||
|
|
ed630ea17c | ||
|
|
9bcd37146e | ||
|
|
2201ef8556 | ||
|
|
f663118155 | ||
|
|
b5176166b7 | ||
|
|
4a50b674f2 | ||
|
|
6a7e90ad78 | ||
|
|
f0561fcffd | ||
|
|
5e019069ab | ||
|
|
39c2d26e0b | ||
|
|
ff70ec0a00 | ||
|
|
ee0525b2b2 | ||
|
|
f84700fba8 | ||
|
|
577f460369 | ||
|
|
6bbd890f05 | ||
|
|
146fec0820 | ||
|
|
a58860e480 | ||
|
|
60d0672426 | ||
|
|
a831d2e4e3 | ||
|
|
d88e0ec080 | ||
|
|
6475382d80 | ||
|
|
74bf3fdbb9 | ||
|
|
c87572d6e4 | ||
|
|
5ef91b96f1 | ||
|
|
c7d6d5c69e | ||
|
|
245ee14220 | ||
|
|
23d8a55c7a | ||
|
|
ea23210b2d | ||
|
|
4b4536dd02 | ||
|
|
6deeefb68c | ||
|
|
abadf44eb2 | ||
|
|
e88b90aaeb | ||
|
|
638001116d | ||
|
|
3960527c2e | ||
|
|
ad09ee9262 | ||
|
|
1330c311b7 | ||
|
|
a46fabf17b | ||
|
|
8af9f11bea | ||
|
|
3f11cbb404 | ||
|
|
24d814ca23 | ||
|
|
d73683c363 | ||
|
|
0cb0c7bcd5 | ||
|
|
0536d0c9be | ||
|
|
5d17c31596 | ||
|
|
e81c093974 | ||
|
|
b9391c9575 | ||
|
|
ae5b3104f0 | ||
|
|
e49eb1a886 | ||
|
|
f64c96662e | ||
|
|
52642860da | ||
|
|
814cc00cb9 | ||
|
|
05299599b6 | ||
|
|
3b7e0e002b | ||
|
|
4286e429a7 | ||
|
|
c3f296af32 | ||
|
|
dbdf843012 | ||
|
|
ebd6a15af3 | ||
|
|
94f7b4cd54 | ||
|
|
863087d186 | ||
|
|
957129f4a7 | ||
|
|
0d5f2f4bb0 | ||
|
|
a25ddf26a3 | ||
|
|
bc9b75c6f0 | ||
|
|
8033b257a7 | ||
|
|
1cdc253e0a | ||
|
|
c556ed9e15 | ||
|
|
6e89ec5e32 | ||
|
|
d184cbc031 | ||
|
|
98681f90cb | ||
|
|
af8ba6b525 | ||
|
|
7571bf86f0 | ||
|
|
b3e44f0bdf | ||
|
|
370080531e | ||
|
|
b0d112e78b | ||
|
|
68ef7ebbef | ||
|
|
0f8ffa38b5 | ||
|
|
ac0d45b78b | ||
|
|
83b0ea047b | ||
|
|
7f93eb1903 | ||
|
|
a5afdd15e5 | ||
|
|
160522e32c | ||
|
|
f6fa2c0b31 | ||
|
|
08f41a6f05 | ||
|
|
d7bf793cc1 | ||
|
|
7d846e8704 | ||
|
|
540c5e168b | ||
|
|
2a81393a4b | ||
|
|
54f3f369bd | ||
|
|
ef6bdafb29 | ||
|
|
46a446828d | ||
|
|
e0992fcc5b | ||
|
|
184303b865 | ||
|
|
57ad702af0 | ||
|
|
b660327056 | ||
|
|
c3d4ad8afd | ||
|
|
a5bab2d058 | ||
|
|
c80a9fe13d | ||
|
|
5a246611e3 | ||
|
|
a855b7c3a8 | ||
|
|
281551f720 | ||
|
|
750d4d7599 | ||
|
|
dcd85b976d | ||
|
|
b36095ae5c | ||
|
|
ee42a5513e | ||
|
|
6b9e1014cf | ||
|
|
611215a49c | ||
|
|
2cad8baa70 | ||
|
|
fcfb591b31 | ||
|
|
cc109b79dd | ||
|
|
a1f307f7d1 | ||
|
|
e17a110661 | ||
|
|
fbe0a82c0d | ||
|
|
99e205fc21 | ||
|
|
49d3bca37b | ||
|
|
a8ce7aeb43 | ||
|
|
02b44db922 | ||
|
|
33f904835a | ||
|
|
77d9357226 | ||
|
|
bdbeeb94ec | ||
|
|
8df862e45d | ||
|
|
d5275fc55f | ||
|
|
f74d178b17 | ||
|
|
cf9d56e5cf | ||
|
|
1fe5001369 | ||
|
|
9f7aaf90b5 | ||
|
|
aa6ad288f1 | ||
|
|
fa4d609e20 | ||
|
|
51fc3f693e | ||
|
|
9bae740527 | ||
|
|
1755326d8a | ||
|
|
1dc5a791cf | ||
|
|
ba64c3b615 | ||
|
|
f3eac2b3e9 | ||
|
|
6b7462a13f | ||
|
|
5bd3cb7260 | ||
|
|
04345338e1 | ||
|
|
d31f5f4d89 | ||
|
|
ce84dd9e20 | ||
|
|
33f7e5ce2a | ||
|
|
91085ef49e | ||
|
|
ffa637050d | ||
|
|
0d0f32bc53 | ||
|
|
90a28fb475 | ||
|
|
ae6cf586b0 | ||
|
|
6ae0c8db33 | ||
|
|
d9a8728b11 | ||
|
|
67aa18e8dc | ||
|
|
ed83c3a018 | ||
|
|
aa9b00fb2f | ||
|
|
5e52d8563b | ||
|
|
5d7a6ad223 | ||
|
|
2093f83ea0 | ||
|
|
837f62266b | ||
|
|
07124d028d | ||
|
|
0e68760078 | ||
|
|
b0a66ab83c | ||
|
|
74b74462f1 | ||
|
|
0f6e525be3 | ||
|
|
ceecedc68b | ||
|
|
e9e066055f | ||
|
|
351fdfede6 | ||
|
|
2f23eb27b3 | ||
|
|
11c23af465 | ||
|
|
026f4bdf3c | ||
|
|
198d52da3a | ||
|
|
a17f64361c | ||
|
|
5909751936 | ||
|
|
0b885d62ef | ||
|
|
722b4f302d | ||
|
|
3b72bb780a | ||
|
|
1dee1e900b | ||
|
|
59dc87c618 | ||
|
|
2b6a77fcde | ||
|
|
a8a50f5b57 | ||
|
|
5ce0b17e38 | ||
|
|
95c5b9bfb3 | ||
|
|
acc7820574 | ||
|
|
14d8f342d5 | ||
|
|
4fb3cb208a | ||
|
|
dac148341b | ||
|
|
842c2cfbf1 | ||
|
|
d386f2f339 | ||
|
|
e601f35d3b | ||
|
|
7b14c4a018 | ||
|
|
38e0e59f42 | ||
|
|
48c3a96886 | ||
|
|
48e57a6452 | ||
|
|
914e73cdd9 | ||
|
|
066b9f52b8 | ||
|
|
8363588237 | ||
|
|
855af069a4 | ||
|
|
19a1aac48c | ||
|
|
edc244eec4 | ||
|
|
608bf7d741 | ||
|
|
107f256cd8 | ||
|
|
8f5d7302ac | ||
|
|
28c98e51ff | ||
|
|
b5ce7f5874 | ||
|
|
e8b68a4e4b | ||
|
|
1177d3f3a3 | ||
|
|
47f4f493f0 | ||
|
|
326c893d24 | ||
|
|
2d07c73777 | ||
|
|
3cfac9593c | ||
|
|
8039685051 | ||
|
|
feee819973 | ||
|
|
da4e52544e | ||
|
|
d56e95ea8b | ||
|
|
dc69a1cf43 | ||
|
|
47e63cc67a | ||
|
|
96ed33739a | ||
|
|
01243b98e1 | ||
|
|
473d3801b6 | ||
|
|
1d16f5ea0e | ||
|
|
937dea42e7 | ||
|
|
c3843fd075 | ||
|
|
bf46821180 | ||
|
|
e48ba84e0b | ||
|
|
e97d1cf001 | ||
|
|
645b1f0ea1 | ||
|
|
c2ba994dbb | ||
|
|
d2906fe666 | ||
|
|
d773290cb1 | ||
|
|
7c232bd98b | ||
|
|
d74054afda | ||
|
|
bca3455b38 | ||
|
|
187dc6ad02 | ||
|
|
e16521faab | ||
|
|
4e2a072a05 | ||
|
|
32ad2a3349 | ||
|
|
3889fcd9d7 | ||
|
|
b064a41291 | ||
|
|
1adf27c82a | ||
|
|
3cf7d6d5b6 | ||
|
|
cff1cb8685 | ||
|
|
dd57715de2 | ||
|
|
91718b3f23 | ||
|
|
be29ed7ad8 | ||
|
|
2b6b7f482a | ||
|
|
3675fb9bc6 | ||
|
|
7ba98a2874 | ||
|
|
4be582d7c8 | ||
|
|
01fbd95736 | ||
|
|
03edfc5850 | ||
|
|
391fb47791 | ||
|
|
3a86477162 | ||
|
|
b7dec300b7 | ||
|
|
51b8a21f0c | ||
|
|
9279a2c4e4 | ||
|
|
9c59bc59c8 | ||
|
|
dd2954f78d | ||
|
|
4efe1d4d3f |
@@ -1,22 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
command: -c fsync=off
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /src
|
||||
volumes:
|
||||
- ..:/src
|
||||
@@ -1,22 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:11
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
command: -c fsync=off
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /src
|
||||
volumes:
|
||||
- ..:/src
|
||||
@@ -1,22 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
command: -c fsync=off
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /src
|
||||
volumes:
|
||||
- ..:/src
|
||||
18
.buildkite/scripts/test_old_deps.sh
Executable file
18
.buildkite/scripts/test_old_deps.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev
|
||||
|
||||
# workaround for https://github.com/jaraco/zipp/issues/40
|
||||
python3.5 -m pip install 'setuptools>=34.4.0'
|
||||
|
||||
python3.5 -m pip install tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
exec tox -e py35-old,combine
|
||||
@@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update
|
||||
|
||||
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
|
||||
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
|
||||
|
||||
Can get rooms/{roomId}/members at a given point
|
||||
|
||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -3,6 +3,10 @@
|
||||
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
|
||||
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
|
||||
- Use markdown where necessary, mostly for `code blocks`.
|
||||
- End with either a period (.) or an exclamation mark (!).
|
||||
- Start with a capital letter.
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
|
||||
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
|
||||
|
||||
287
CHANGES.md
287
CHANGES.md
@@ -1,6 +1,291 @@
|
||||
Synapse 1.11.0 (2020-02-21)
|
||||
===========================
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Small grammatical fixes to the ACME v1 deprecation notice. ([\#6944](https://github.com/matrix-org/synapse/issues/6944))
|
||||
|
||||
|
||||
Synapse 1.11.0rc1 (2020-02-19)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Admin API to add or modify threepids of user accounts. ([\#6769](https://github.com/matrix-org/synapse/issues/6769))
|
||||
- Limit the number of events that can be requested by the backfill federation API to 100. ([\#6864](https://github.com/matrix-org/synapse/issues/6864))
|
||||
- Add ability to run some group APIs on workers. ([\#6866](https://github.com/matrix-org/synapse/issues/6866))
|
||||
- Reject device display names over 100 characters in length to prevent abuse. ([\#6882](https://github.com/matrix-org/synapse/issues/6882))
|
||||
- Add ability to route federation user device queries to workers. ([\#6873](https://github.com/matrix-org/synapse/issues/6873))
|
||||
- The result of a user directory search can now be filtered via the spam checker. ([\#6888](https://github.com/matrix-org/synapse/issues/6888))
|
||||
- Implement new `GET /_matrix/client/unstable/org.matrix.msc2432/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949))
|
||||
- Stop sending `m.room.alias` events wheng adding / removing aliases. Check `alt_aliases` in the latest `m.room.canonical_alias` event when deleting an alias. ([\#6904](https://github.com/matrix-org/synapse/issues/6904))
|
||||
- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed third party event rules function `on_create_room`'s return value being ignored. ([\#6781](https://github.com/matrix-org/synapse/issues/6781))
|
||||
- Allow URL-encoded User IDs on `/_synapse/admin/v2/users/<user_id>[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825))
|
||||
- Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849))
|
||||
- Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945))
|
||||
- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- The deprecated "generate-config-on-the-fly" mode is no longer supported. ([\#6918](https://github.com/matrix-org/synapse/issues/6918))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add details of PR merge strategy to contributing docs. ([\#6846](https://github.com/matrix-org/synapse/issues/6846))
|
||||
- Spell out that the last event sent to a room won't be deleted by a purge. ([\#6891](https://github.com/matrix-org/synapse/issues/6891))
|
||||
- Update Synapse's documentation to warn about the deprecation of ACME v1. ([\#6905](https://github.com/matrix-org/synapse/issues/6905), [\#6907](https://github.com/matrix-org/synapse/issues/6907), [\#6909](https://github.com/matrix-org/synapse/issues/6909))
|
||||
- Add documentation for the spam checker. ([\#6906](https://github.com/matrix-org/synapse/issues/6906))
|
||||
- Fix worker docs to point `/publicised_groups` API correctly. ([\#6938](https://github.com/matrix-org/synapse/issues/6938))
|
||||
- Clean up and update docs on setting up federation. ([\#6940](https://github.com/matrix-org/synapse/issues/6940))
|
||||
- Add a warning about indentation to generated configuration files. ([\#6920](https://github.com/matrix-org/synapse/issues/6920))
|
||||
- Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. ([\#6921](https://github.com/matrix-org/synapse/issues/6921))
|
||||
- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. ([\#6877](https://github.com/matrix-org/synapse/issues/6877))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add type hints to `SyncHandler`. ([\#6821](https://github.com/matrix-org/synapse/issues/6821))
|
||||
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6823](https://github.com/matrix-org/synapse/issues/6823), [\#6827](https://github.com/matrix-org/synapse/issues/6827), [\#6854](https://github.com/matrix-org/synapse/issues/6854), [\#6856](https://github.com/matrix-org/synapse/issues/6856), [\#6857](https://github.com/matrix-org/synapse/issues/6857), [\#6858](https://github.com/matrix-org/synapse/issues/6858))
|
||||
- Fix stacktraces when using `ObservableDeferred` and async/await. ([\#6836](https://github.com/matrix-org/synapse/issues/6836))
|
||||
- Port much of `synapse.handlers.federation` to async/await. ([\#6837](https://github.com/matrix-org/synapse/issues/6837), [\#6840](https://github.com/matrix-org/synapse/issues/6840))
|
||||
- Populate `rooms.room_version` database column at startup, rather than in a background update. ([\#6847](https://github.com/matrix-org/synapse/issues/6847))
|
||||
- Reduce amount we log at `INFO` level. ([\#6833](https://github.com/matrix-org/synapse/issues/6833), [\#6862](https://github.com/matrix-org/synapse/issues/6862))
|
||||
- Remove unused `get_room_stats_state` method. ([\#6869](https://github.com/matrix-org/synapse/issues/6869))
|
||||
- Add typing to `synapse.federation.sender` and port to async/await. ([\#6871](https://github.com/matrix-org/synapse/issues/6871))
|
||||
- Refactor `_EventInternalMetadata` object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872))
|
||||
- Add an additional entry to the SyTest blacklist for worker mode. ([\#6883](https://github.com/matrix-org/synapse/issues/6883))
|
||||
- Fix the use of sed in the linting scripts when using BSD sed. ([\#6887](https://github.com/matrix-org/synapse/issues/6887))
|
||||
- Add type hints to the spam checker module. ([\#6915](https://github.com/matrix-org/synapse/issues/6915))
|
||||
- Convert the directory handler tests to use HomeserverTestCase. ([\#6919](https://github.com/matrix-org/synapse/issues/6919))
|
||||
- Increase DB/CPU perf of `_is_server_still_joined` check. ([\#6936](https://github.com/matrix-org/synapse/issues/6936))
|
||||
- Tiny optimisation for incoming HTTP request dispatch. ([\#6950](https://github.com/matrix-org/synapse/issues/6950))
|
||||
|
||||
|
||||
Synapse 1.10.1 (2020-02-17)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. ([\#6924](https://github.com/matrix-org/synapse/issues/6924))
|
||||
|
||||
|
||||
Synapse 1.10.0 (2020-02-12)
|
||||
===========================
|
||||
|
||||
**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details.
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Update the docker images to Alpine Linux 3.11. ([\#6897](https://github.com/matrix-org/synapse/issues/6897))
|
||||
|
||||
|
||||
Synapse 1.10.0rc5 (2020-02-11)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884))
|
||||
|
||||
Synapse 1.10.0rc4 (2020-02-11)
|
||||
==============================
|
||||
|
||||
This release candidate was built incorrectly and is superceded by 1.10.0rc5.
|
||||
|
||||
Synapse 1.10.0rc3 (2020-02-10)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Filter out `m.room.aliases` from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. ([\#6880](https://github.com/matrix-org/synapse/issues/6880))
|
||||
|
||||
|
||||
Synapse 1.10.0rc2 (2020-02-06)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844))
|
||||
- Fix to the unknown remote device detection which was introduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Detect unexpected sender keys on remote encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850))
|
||||
|
||||
|
||||
Synapse 1.10.0rc1 (2020-01-31)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support for updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). ([\#6734](https://github.com/matrix-org/synapse/issues/6734))
|
||||
- Minor fixes to `PUT /_synapse/admin/v2/users` admin api. ([\#6761](https://github.com/matrix-org/synapse/issues/6761))
|
||||
- Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. ([\#6767](https://github.com/matrix-org/synapse/issues/6767))
|
||||
- Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). ([\#6771](https://github.com/matrix-org/synapse/issues/6771))
|
||||
- Fix outbound federation request metrics. ([\#6795](https://github.com/matrix-org/synapse/issues/6795))
|
||||
- Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. ([\#6796](https://github.com/matrix-org/synapse/issues/6796))
|
||||
- Fix race in federation sender worker that delayed sending of device updates. ([\#6799](https://github.com/matrix-org/synapse/issues/6799), [\#6800](https://github.com/matrix-org/synapse/issues/6800))
|
||||
- Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. ([\#6801](https://github.com/matrix-org/synapse/issues/6801))
|
||||
- Fix waking up other workers when remote server is detected to have come back online. ([\#6811](https://github.com/matrix-org/synapse/issues/6811))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify documentation related to `user_dir` and `federation_reader` workers. ([\#6775](https://github.com/matrix-org/synapse/issues/6775))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Record room versions in the `rooms` table. ([\#6729](https://github.com/matrix-org/synapse/issues/6729), [\#6788](https://github.com/matrix-org/synapse/issues/6788), [\#6810](https://github.com/matrix-org/synapse/issues/6810))
|
||||
- Propagate cache invalidates from workers to other workers. ([\#6748](https://github.com/matrix-org/synapse/issues/6748))
|
||||
- Remove some unnecessary admin handler abstraction methods. ([\#6751](https://github.com/matrix-org/synapse/issues/6751))
|
||||
- Add some debugging for media storage providers. ([\#6757](https://github.com/matrix-org/synapse/issues/6757))
|
||||
- Detect unknown remote devices and mark cache as stale. ([\#6776](https://github.com/matrix-org/synapse/issues/6776), [\#6819](https://github.com/matrix-org/synapse/issues/6819))
|
||||
- Attempt to resync remote users' devices when detected as stale. ([\#6786](https://github.com/matrix-org/synapse/issues/6786))
|
||||
- Delete current state from the database when server leaves a room. ([\#6792](https://github.com/matrix-org/synapse/issues/6792))
|
||||
- When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. ([\#6797](https://github.com/matrix-org/synapse/issues/6797))
|
||||
- Add background update to clean out left rooms from current state. ([\#6802](https://github.com/matrix-org/synapse/issues/6802), [\#6816](https://github.com/matrix-org/synapse/issues/6816))
|
||||
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6803](https://github.com/matrix-org/synapse/issues/6803), [\#6805](https://github.com/matrix-org/synapse/issues/6805), [\#6806](https://github.com/matrix-org/synapse/issues/6806), [\#6807](https://github.com/matrix-org/synapse/issues/6807), [\#6820](https://github.com/matrix-org/synapse/issues/6820))
|
||||
|
||||
|
||||
Synapse 1.9.1 (2020-01-28)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where setting `mau_limit_reserved_threepids` config would cause Synapse to refuse to start. ([\#6793](https://github.com/matrix-org/synapse/issues/6793))
|
||||
|
||||
|
||||
Synapse 1.9.0 (2020-01-23)
|
||||
==========================
|
||||
|
||||
**WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)).
|
||||
|
||||
If your Synapse deployment uses workers, note that the reverse-proxy configurations for the `synapse.app.media_repository`, `synapse.app.federation_reader` and `synapse.app.event_creator` workers have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). Existing configurations will continue to work.
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix endpoint documentation for the List Rooms admin API. ([\#6770](https://github.com/matrix-org/synapse/issues/6770))
|
||||
|
||||
|
||||
Synapse 1.9.0rc1 (2020-01-22)
|
||||
=============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5742](https://github.com/matrix-org/synapse/issues/5742))
|
||||
- Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. ([\#6681](https://github.com/matrix-org/synapse/issues/6681), [\#6756](https://github.com/matrix-org/synapse/issues/6756))
|
||||
- Add `org.matrix.e2e_cross_signing` to `unstable_features` in `/versions` as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712))
|
||||
- Add a new admin API to list and filter rooms on the server. ([\#6720](https://github.com/matrix-org/synapse/issues/6720))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Correctly proxy HTTP errors due to API calls to remote group servers. ([\#6654](https://github.com/matrix-org/synapse/issues/6654))
|
||||
- Fix media repo admin APIs when using a media worker. ([\#6664](https://github.com/matrix-org/synapse/issues/6664))
|
||||
- Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. ([\#6682](https://github.com/matrix-org/synapse/issues/6682))
|
||||
- Fix a bug where we would assign a numeric user ID if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690))
|
||||
- Fix `purge_room` admin API. ([\#6711](https://github.com/matrix-org/synapse/issues/6711))
|
||||
- Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. ([\#6714](https://github.com/matrix-org/synapse/issues/6714))
|
||||
- Fix the `synapse_port_db` not correctly running background updates. Thanks @tadzik for reporting. ([\#6718](https://github.com/matrix-org/synapse/issues/6718))
|
||||
- Fix changing password via user admin API. ([\#6730](https://github.com/matrix-org/synapse/issues/6730))
|
||||
- Fix `/events/:event_id` deprecated API. ([\#6731](https://github.com/matrix-org/synapse/issues/6731))
|
||||
- Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). ([\#6742](https://github.com/matrix-org/synapse/issues/6742))
|
||||
- Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. ([\#6747](https://github.com/matrix-org/synapse/issues/6747))
|
||||
- Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. ([\#6753](https://github.com/matrix-org/synapse/issues/6753))
|
||||
- Fix the `docker-compose.yaml` overriding the entire `/etc` folder of the container. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix a typo in the configuration example for purge jobs in the sample configuration file. ([\#6621](https://github.com/matrix-org/synapse/issues/6621))
|
||||
- Add complete documentation of the message retention policies support. ([\#6624](https://github.com/matrix-org/synapse/issues/6624), [\#6665](https://github.com/matrix-org/synapse/issues/6665))
|
||||
- Add some helpful tips about changelog entries to the GitHub pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663))
|
||||
- Clarify the `account_validity` and `email` sections of the sample configuration. ([\#6685](https://github.com/matrix-org/synapse/issues/6685))
|
||||
- Add more endpoints to the documentation for Synapse workers. ([\#6698](https://github.com/matrix-org/synapse/issues/6698))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). ([\#6675](https://github.com/matrix-org/synapse/issues/6675))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add `local_current_membership` table for tracking local user membership state in rooms. ([\#6655](https://github.com/matrix-org/synapse/issues/6655), [\#6728](https://github.com/matrix-org/synapse/issues/6728))
|
||||
- Port `synapse.replication.tcp` to async/await. ([\#6666](https://github.com/matrix-org/synapse/issues/6666))
|
||||
- Fixup `synapse.replication` to pass mypy checks. ([\#6667](https://github.com/matrix-org/synapse/issues/6667))
|
||||
- Allow `additional_resources` to implement `IResource` directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686))
|
||||
- Allow REST endpoint implementations to raise a `RedirectException`, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687))
|
||||
- Updates and extensions to the module API. ([\#6688](https://github.com/matrix-org/synapse/issues/6688))
|
||||
- Updates to the SAML mapping provider API. ([\#6689](https://github.com/matrix-org/synapse/issues/6689), [\#6723](https://github.com/matrix-org/synapse/issues/6723))
|
||||
- Remove redundant `RegistrationError` class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691))
|
||||
- Don't block processing of incoming EDUs behind processing PDUs in the same transaction. ([\#6697](https://github.com/matrix-org/synapse/issues/6697))
|
||||
- Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. ([\#6702](https://github.com/matrix-org/synapse/issues/6702))
|
||||
- Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. ([\#6706](https://github.com/matrix-org/synapse/issues/6706))
|
||||
- Add `StateMap` type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715))
|
||||
- Add a `DeltaState` to track changes to be made to current state during event persistence. ([\#6716](https://github.com/matrix-org/synapse/issues/6716))
|
||||
- Add more logging around message retention policies support. ([\#6717](https://github.com/matrix-org/synapse/issues/6717))
|
||||
- When processing a SAML response, log the assertions for easier configuration. ([\#6724](https://github.com/matrix-org/synapse/issues/6724))
|
||||
- Fixup `synapse.rest` to pass mypy. ([\#6732](https://github.com/matrix-org/synapse/issues/6732), [\#6764](https://github.com/matrix-org/synapse/issues/6764))
|
||||
- Fixup `synapse.api` to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733))
|
||||
- Allow streaming cache 'invalidate all' to workers. ([\#6749](https://github.com/matrix-org/synapse/issues/6749))
|
||||
- Remove unused CI docker compose files. ([\#6754](https://github.com/matrix-org/synapse/issues/6754))
|
||||
|
||||
|
||||
Synapse 1.8.0 (2020-01-09)
|
||||
==========================
|
||||
|
||||
**WARNING**: As of this release Synapse will refuse to start if the `log_file` config option is specified. Support for the option was removed in v1.3.0.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
@@ -16,7 +301,7 @@ Features
|
||||
|
||||
- Add v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)). ([\#6349](https://github.com/matrix-org/synapse/issues/6349))
|
||||
- Add a develop script to generate full SQL schemas. ([\#6394](https://github.com/matrix-org/synapse/issues/6394))
|
||||
- Add custom SAML username mapping functinality through an external provider plugin. ([\#6411](https://github.com/matrix-org/synapse/issues/6411))
|
||||
- Add custom SAML username mapping functionality through an external provider plugin. ([\#6411](https://github.com/matrix-org/synapse/issues/6411))
|
||||
- Automatically delete empty groups/communities. ([\#6453](https://github.com/matrix-org/synapse/issues/6453))
|
||||
- Add option `limit_profile_requests_to_users_who_share_rooms` to prevent requirement of a local user sharing a room with another user to query their profile information. ([\#6523](https://github.com/matrix-org/synapse/issues/6523))
|
||||
- Add an `export_signing_key` script to extract the public part of signing keys when rotating them. ([\#6546](https://github.com/matrix-org/synapse/issues/6546))
|
||||
|
||||
@@ -101,8 +101,8 @@ in the format of `PRnumber.type`. The type can be one of the following:
|
||||
The content of the file is your changelog entry, which should be a short
|
||||
description of your change in the same style as the rest of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
|
||||
contain Markdown formatting, and should end with a full stop ('.') for
|
||||
consistency.
|
||||
contain Markdown formatting, and should end with a full stop (.) or an
|
||||
exclamation mark (!) for consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
@@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
## Merge Strategy
|
||||
|
||||
We use the commit history of develop/master extensively to identify
|
||||
when regressions were introduced and what changes have been made.
|
||||
|
||||
We aim to have a clean merge history, which means we normally squash-merge
|
||||
changes into develop. For small changes this means there is no need to rebase
|
||||
to clean up your PR before merging. Larger changes with an organised set of
|
||||
commits may be merged as-is, if the history is judged to be useful.
|
||||
|
||||
This use of squash-merging will mean PRs built on each other will be hard to
|
||||
merge. We suggest avoiding these where possible, and if required, ensuring
|
||||
each PR has a tidy set of commits to ease merging.
|
||||
|
||||
## Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
|
||||
30
INSTALL.md
30
INSTALL.md
@@ -133,6 +133,11 @@ sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
|
||||
uses SQLite 3.7. You may be able to work around this by installing a more
|
||||
recent SQLite version, but it is recommended that you instead use a Postgres
|
||||
database: see [docs/postgres.md](docs/postgres.md).
|
||||
|
||||
#### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
@@ -383,15 +388,17 @@ Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||
is suitable for local testing, but for any practical use, you will either need
|
||||
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
but for any practical use, you will need Synapse's APIs to be served
|
||||
over HTTPS.
|
||||
|
||||
For information on using a reverse proxy, see
|
||||
The recommended way to do so is to set up a reverse proxy on port
|
||||
`8448`. You can find documentation on doing so in
|
||||
[docs/reverse_proxy.md](docs/reverse_proxy.md).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`, as follows:
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
@@ -409,10 +416,13 @@ To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
|
||||
are using your own certificate, be sure to use a `.pem` file that includes
|
||||
the full certificate chain including any intermediate certificates (for
|
||||
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
Note that, as pointed out in that document, this feature will not
|
||||
work with installs set up after November 2020.
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
|
||||
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install --no-use-pep517 -e .[all]
|
||||
python -m pip install --no-use-pep517 -e ".[all]"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
18
UPGRADE.rst
18
UPGRADE.rst
@@ -75,6 +75,24 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
|
||||
Upgrading to v1.10.0
|
||||
====================
|
||||
|
||||
Synapse will now log a warning on start up if used with a PostgreSQL database
|
||||
that has a non-recommended locale set.
|
||||
|
||||
See `docs/postgres.md <docs/postgres.md>`_ for details.
|
||||
|
||||
|
||||
Upgrading to v1.8.0
|
||||
===================
|
||||
|
||||
Specifying a ``log_file`` config option will now cause Synapse to refuse to
|
||||
start, and should be replaced by with the ``log_config`` option. Support for
|
||||
the ``log_file`` option was removed in v1.3.0 and has since had no effect.
|
||||
|
||||
|
||||
Upgrading to v1.7.0
|
||||
===================
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ services:
|
||||
- SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./matrix-config:/etc
|
||||
- ./matrix-config/homeserver.yaml:/etc/homeserver.yaml
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
@@ -56,6 +56,9 @@ services:
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
# ensure the database gets created correctly
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
|
||||
- POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
|
||||
30
debian/changelog
vendored
30
debian/changelog
vendored
@@ -1,3 +1,33 @@
|
||||
matrix-synapse-py3 (1.11.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.11.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 21 Feb 2020 08:54:34 +0000
|
||||
|
||||
matrix-synapse-py3 (1.10.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.10.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Feb 2020 16:27:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.10.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.10.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Feb 2020 12:18:54 +0000
|
||||
|
||||
matrix-synapse-py3 (1.9.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.9.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2020 13:09:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.9.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.9.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 23 Jan 2020 12:56:31 +0000
|
||||
|
||||
matrix-synapse-py3 (1.8.0) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
||||
###
|
||||
### Stage 0: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
|
||||
@@ -110,12 +110,12 @@ argument to `docker run`.
|
||||
|
||||
## Legacy dynamic configuration file support
|
||||
|
||||
For backwards-compatibility only, the docker image supports creating a dynamic
|
||||
configuration file based on environment variables. This is now deprecated, but
|
||||
is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is
|
||||
not given).
|
||||
The docker image used to support creating a dynamic configuration file based
|
||||
on environment variables. This is no longer supported, and an error will be
|
||||
raised if you try to run synapse without a config file.
|
||||
|
||||
To migrate from a dynamic configuration file to a static one, run the docker
|
||||
It is, however, possible to generate a static configuration file based on
|
||||
the environment variables that were previously used. To do this, run the docker
|
||||
container once with the environment variables set, and `migrate_config`
|
||||
command line option. For example:
|
||||
|
||||
@@ -127,15 +127,20 @@ docker run -it --rm \
|
||||
matrixdotorg/synapse:latest migrate_config
|
||||
```
|
||||
|
||||
This will generate the same configuration file as the legacy mode used, but
|
||||
will store it in `/data/homeserver.yaml` instead of a temporary location. You
|
||||
can then use it as shown above at [Running synapse](#running-synapse).
|
||||
This will generate the same configuration file as the legacy mode used, and
|
||||
will store it in `/data/homeserver.yaml`. You can then use it as shown above at
|
||||
[Running synapse](#running-synapse).
|
||||
|
||||
Note that the defaults used in this configuration file may be different to
|
||||
those when generating a new config file with `generate`: for example, TLS is
|
||||
enabled by default in this mode. You are encouraged to inspect the generated
|
||||
configuration file and edit it to ensure it meets your needs.
|
||||
|
||||
## Building the image
|
||||
|
||||
If you need to build the image from a Synapse checkout, use the following `docker
|
||||
build` command from the repo's root:
|
||||
|
||||
|
||||
```
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
@@ -188,11 +188,6 @@ def main(args, environ):
|
||||
else:
|
||||
ownership = "{}:{}".format(desired_uid, desired_gid)
|
||||
|
||||
log(
|
||||
"Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
|
||||
% (os.getuid(), os.getgid(), desired_uid, desired_gid)
|
||||
)
|
||||
|
||||
if ownership is None:
|
||||
log("Will not perform chmod/su-exec as UserID already matches request")
|
||||
|
||||
@@ -213,38 +208,30 @@ def main(args, environ):
|
||||
if mode is not None:
|
||||
error("Unknown execution mode '%s'" % (mode,))
|
||||
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
# backwards-compatibility generate-a-config-on-the-fly mode
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
error(
|
||||
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
|
||||
"in `generate` or `migrate_config` mode. To start synapse using a "
|
||||
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
|
||||
)
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
|
||||
config_path = "/compiled/homeserver.yaml"
|
||||
log(
|
||||
"Generating config file '%s' on-the-fly from environment variables.\n"
|
||||
"Note that this mode is deprecated. You can migrate to a static config\n"
|
||||
"file by running with 'migrate_config'. See the README for more details."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
generate_config_from_template("/compiled", config_path, environ, ownership)
|
||||
else:
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get(
|
||||
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
|
||||
)
|
||||
if not os.path.exists(config_path):
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
"""\
|
||||
Config file '%s' does not exist.
|
||||
|
||||
The synapse docker image no longer supports generating a config file on-the-fly
|
||||
based on environment variables. You can migrate to a static config file by
|
||||
running with 'migrate_config'. See the README for more details.
|
||||
"""
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
log("Starting synapse with config file " + config_path)
|
||||
|
||||
args = ["python", "-m", synapse_worker, "--config-path", config_path]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The config is maintained as an up-to-date snapshot of the default
|
||||
# This file is maintained as an up-to-date snapshot of the default
|
||||
# homeserver.yaml configuration generated by Synapse.
|
||||
#
|
||||
# It is intended to act as a reference for the default configuration,
|
||||
@@ -10,3 +10,5 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
55
docs/ACME.md
55
docs/ACME.md
@@ -1,12 +1,48 @@
|
||||
# ACME
|
||||
|
||||
Synapse v1.0 will require valid TLS certificates for communication between
|
||||
servers (port `8448` by default) in addition to those that are client-facing
|
||||
(port `443`). If you do not already have a valid certificate for your domain,
|
||||
the easiest way to get one is with Synapse's new ACME support, which will use
|
||||
the ACME protocol to provision a certificate automatically. Synapse v0.99.0+
|
||||
will provision server-to-server certificates automatically for you for free
|
||||
through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
|
||||
From version 1.0 (June 2019) onwards, Synapse requires valid TLS
|
||||
certificates for communication between servers (by default on port
|
||||
`8448`) in addition to those that are client-facing (port `443`). To
|
||||
help homeserver admins fulfil this new requirement, Synapse v0.99.0
|
||||
introduced support for automatically provisioning certificates through
|
||||
[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol.
|
||||
|
||||
## Deprecation of ACME v1
|
||||
|
||||
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
|
||||
Let's Encrypt announced that they were deprecating version 1 of the ACME
|
||||
protocol, with the plan to disable the use of it for new accounts in
|
||||
November 2019, and for existing accounts in June 2020.
|
||||
|
||||
Synapse doesn't currently support version 2 of the ACME protocol, which
|
||||
means that:
|
||||
|
||||
* for existing installs, Synapse's built-in ACME support will continue
|
||||
to work until June 2020.
|
||||
* for new installs, this feature will not work at all.
|
||||
|
||||
Either way, it is recommended to move from Synapse's ACME support
|
||||
feature to an external automated tool such as [certbot](https://github.com/certbot/certbot)
|
||||
(or browse [this list](https://letsencrypt.org/fr/docs/client-options/)
|
||||
for an alternative ACME client).
|
||||
|
||||
It's also recommended to use a reverse proxy for the server-facing
|
||||
communications (more documentation about this can be found
|
||||
[here](/docs/reverse_proxy.md)) as well as the client-facing ones and
|
||||
have it serve the certificates.
|
||||
|
||||
In case you can't do that and need Synapse to serve them itself, make
|
||||
sure to set the `tls_certificate_path` configuration setting to the path
|
||||
of the certificate (make sure to use the certificate containing the full
|
||||
certification chain, e.g. `fullchain.pem` if using certbot) and
|
||||
`tls_private_key_path` to the path of the matching private key. Note
|
||||
that in this case you will need to restart Synapse after each
|
||||
certificate renewal so that Synapse stops using the old certificate.
|
||||
|
||||
If you still want to use Synapse's built-in ACME support, the rest of
|
||||
this document explains how to set it up.
|
||||
|
||||
## Initial setup
|
||||
|
||||
In the case that your `server_name` config variable is the same as
|
||||
the hostname that the client connects to, then the same certificate can be
|
||||
@@ -32,11 +68,6 @@ If you already have certificates, you will need to back up or delete them
|
||||
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||
directory), Synapse's ACME implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
already have a valid certificate for your homeserver's domain, that can be
|
||||
placed in Synapse's config directory without the need for any ACME setup.
|
||||
|
||||
## ACME setup
|
||||
|
||||
The main steps for enabling ACME support in short summary are:
|
||||
|
||||
@@ -22,19 +22,81 @@ It returns a JSON body like the following:
|
||||
}
|
||||
```
|
||||
|
||||
# Quarantine media in a room
|
||||
|
||||
This API 'quarantines' all the media in a room.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/quarantine_media/<room_id>
|
||||
|
||||
{}
|
||||
```
|
||||
# Quarantine media
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
to any local media, and any locally-cached copies of remote media.
|
||||
|
||||
The media file itself (and any thumbnails) is not deleted from the server.
|
||||
|
||||
## Quarantining media by ID
|
||||
|
||||
This API quarantines a single piece of local or remote media.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/quarantine/<server_name>/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `server_name` is in the form of `example.org`, and `media_id` is in the
|
||||
form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{}
|
||||
```
|
||||
|
||||
## Quarantining media in a room
|
||||
|
||||
This API quarantines all local and remote media in a room.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/room/<room_id>/media/quarantine
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `room_id` is in the form of `!roomid12345:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
Note that there is a legacy endpoint, `POST
|
||||
/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
|
||||
However, it is deprecated and may be removed in a future release.
|
||||
|
||||
## Quarantining all media of a user
|
||||
|
||||
This API quarantines all *local* media that a *local* user has uploaded. That is to say, if
|
||||
you would like to quarantine media uploaded by a user on a remote homeserver, you should
|
||||
instead use one of the other APIs.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/user/<user_id>/media/quarantine
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `user_id` is in the form of `@bob:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -8,6 +8,9 @@ Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
Note that Synapse requires at least one message in each room, so it will never
|
||||
delete the last message in a room.
|
||||
|
||||
The API is:
|
||||
|
||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||
|
||||
173
docs/admin_api/rooms.md
Normal file
173
docs/admin_api/rooms.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# List Room API
|
||||
|
||||
The List Room admin API allows server admins to get a list of rooms on their
|
||||
server. There are various parameters available that allow for filtering and
|
||||
sorting the returned list. This API supports pagination.
|
||||
|
||||
## Parameters
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `from` - Offset in the returned list. Defaults to `0`.
|
||||
* `limit` - Maximum amount of rooms to return. Defaults to `100`.
|
||||
* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
|
||||
- `alphabetical` - Rooms are ordered alphabetically by room name. This is the default.
|
||||
- `size` - Rooms are ordered by the number of members. Largest to smallest.
|
||||
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
||||
part of the room name. Defaults to no filtering.
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `rooms` - An array of objects, each containing information about a room.
|
||||
- Room objects contain the following fields:
|
||||
- `room_id` - The ID of the room.
|
||||
- `name` - The name of the room.
|
||||
- `canonical_alias` - The canonical (main) alias address of the room.
|
||||
- `joined_members` - How many users are currently in the room.
|
||||
* `offset` - The current pagination offset in rooms. This parameter should be
|
||||
used instead of `next_token` for room offset as `next_token` is
|
||||
not intended to be parsed.
|
||||
* `total_rooms` - The total number of rooms this query can return. Using this
|
||||
and `offset`, you have enough information to know the current
|
||||
progression through the list.
|
||||
* `next_batch` - If this field is present, we know that there are potentially
|
||||
more rooms on the server that did not all fit into this response.
|
||||
We can use `next_batch` to get the "next page" of results. To do
|
||||
so, simply repeat your request, setting the `from` parameter to
|
||||
the value of `next_batch`.
|
||||
* `prev_batch` - If this field is present, it is possible to paginate backwards.
|
||||
Use `prev_batch` for the `from` value in the next request to
|
||||
get the "previous page" of results.
|
||||
|
||||
## Usage
|
||||
|
||||
A standard request with no filtering:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326
|
||||
},
|
||||
... (8 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
"total_rooms": 10
|
||||
}
|
||||
```
|
||||
|
||||
Filtering by room name:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?search_term=TWIM
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
"total_rooms": 1
|
||||
}
|
||||
```
|
||||
|
||||
Paginating through a list of rooms:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?order_by=size
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326
|
||||
},
|
||||
... (98 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
"total_rooms": 150
|
||||
"next_token": 100
|
||||
}
|
||||
```
|
||||
|
||||
The presence of the `next_token` parameter tells us that there are more rooms
|
||||
than returned in this request, and we need to make another request to get them.
|
||||
To get the next batch of room results, we repeat our request, setting the `from`
|
||||
parameter to the value of `next_token`.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127
|
||||
},
|
||||
... (48 hidden items) ...
|
||||
{
|
||||
"room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
|
||||
"name": "weechat-matrix",
|
||||
"canonical_alias": "#weechat-matrix:termina.org.uk",
|
||||
"joined_members": 137
|
||||
}
|
||||
],
|
||||
"offset": 100,
|
||||
"prev_batch": 0,
|
||||
"total_rooms": 150
|
||||
}
|
||||
```
|
||||
|
||||
Once the `next_token` parameter is no longer present, we know we've reached the
|
||||
end of the list.
|
||||
@@ -1,3 +1,45 @@
|
||||
Create or modify Account
|
||||
========================
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example,
|
||||
``@user:server.com``.
|
||||
|
||||
This api is::
|
||||
|
||||
PUT /_synapse/admin/v2/users/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"password": "user_password",
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``displayname`` is optional and defaults to ``user_id``.
|
||||
The parameter ``threepids`` is optional.
|
||||
The parameter ``avatar_url`` is optional.
|
||||
The parameter ``admin`` is optional and defaults to 'false'.
|
||||
The parameter ``deactivated`` is optional and defaults to 'false'.
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
List Accounts
|
||||
=============
|
||||
|
||||
@@ -50,7 +92,8 @@ This API returns information about a specific user account.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
GET /_synapse/admin/v1/whois/<user_id> (deprecated)
|
||||
GET /_synapse/admin/v2/users/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
|
||||
94
docs/delegate.md
Normal file
94
docs/delegate.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Delegation
|
||||
|
||||
By default, other homeservers will expect to be able to reach yours via
|
||||
your `server_name`, on port 8448. For example, if you set your `server_name`
|
||||
to `example.com` (so that your user names look like `@user:example.com`),
|
||||
other servers will try to connect to yours at `https://example.com:8448/`.
|
||||
|
||||
Delegation is a Matrix feature allowing a homeserver admin to retain a
|
||||
`server_name` of `example.com` so that user IDs, room aliases, etc continue
|
||||
to look like `*:example.com`, whilst having federation traffic routed
|
||||
to a different server and/or port (e.g. `synapse.example.com:443`).
|
||||
|
||||
## .well-known delegation
|
||||
|
||||
To use this method, you need to be able to alter the
|
||||
`server_name` 's https server to serve the `/.well-known/matrix/server`
|
||||
URL. Having an active server (with a valid TLS certificate) serving your
|
||||
`server_name` domain is out of the scope of this documentation.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/server` should
|
||||
return a JSON structure containing the key `m.server` like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "<synapse.server.name>[:<yourport>]"
|
||||
}
|
||||
```
|
||||
|
||||
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
|
||||
should return:
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "synapse.example.com:443"
|
||||
}
|
||||
```
|
||||
|
||||
Note, specifying a port is optional. If no port is specified, then it defaults
|
||||
to 8448.
|
||||
|
||||
With .well-known delegation, federating servers will check for a valid TLS
|
||||
certificate for the delegated hostname (in our example: `synapse.example.com`).
|
||||
|
||||
## SRV DNS record delegation
|
||||
|
||||
It is also possible to do delegation using a SRV DNS record. However, that is
|
||||
considered an advanced topic since it's a bit complex to set up, and `.well-known`
|
||||
delegation is already enough in most cases.
|
||||
|
||||
However, if you really need it, you can find some documentation on how such a
|
||||
record should look like and how Synapse will use it in [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
|
||||
|
||||
## Delegation FAQ
|
||||
|
||||
### When do I need delegation?
|
||||
|
||||
If your homeserver's APIs are accessible on the default federation port (8448)
|
||||
and the domain your `server_name` points to, you do not need any delegation.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host, giving it a `server_name`
|
||||
of `example.com`, and once a reverse proxy has been set up to proxy all requests
|
||||
sent to the port `8448` and serve TLS certificates for `example.com`, you
|
||||
wouldn't need any delegation set up.
|
||||
|
||||
**However**, if your homeserver's APIs aren't accessible on port 8448 and on the
|
||||
domain `server_name` points to, you will need to let other servers know how to
|
||||
find it using delegation.
|
||||
|
||||
### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
This is no longer necessary. If you are using a reverse proxy for all of your
|
||||
TLS traffic, then you can set `no_tls: True` in the Synapse config.
|
||||
|
||||
In that case, the only reason Synapse needs the certificate is to populate a legacy
|
||||
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
|
||||
and later, and the only time pre-0.99 Synapses will check it is when attempting to
|
||||
fetch the server keys - and generally this is delegated via `matrix.org`, which
|
||||
is running a modern version of Synapse.
|
||||
|
||||
### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy.
|
||||
178
docs/federate.md
178
docs/federate.md
@@ -1,163 +1,41 @@
|
||||
Setting up Federation
|
||||
Setting up federation
|
||||
=====================
|
||||
|
||||
Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
The ``server_name`` configured in the Synapse configuration file (often
|
||||
``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
|
||||
identified (eg: ``@user:example.com``, ``#room:example.com``). By
|
||||
default, it is also the domain that other servers will use to
|
||||
try to reach your server (via port 8448). This is easy to set
|
||||
up and will work provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
||||
which is valid for your ``server_name``.
|
||||
The `server_name` configured in the Synapse configuration file (often
|
||||
`homeserver.yaml`) defines how resources (users, rooms, etc.) will be
|
||||
identified (eg: `@user:example.com`, `#room:example.com`). By default,
|
||||
it is also the domain that other servers will use to try to reach your
|
||||
server (via port 8448). This is easy to set up and will work provided
|
||||
you set the `server_name` to match your machine's public DNS hostname.
|
||||
|
||||
For this default configuration to work, you will need to listen for TLS
|
||||
connections on port 8448. The preferred way to do that is by using a
|
||||
reverse proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions
|
||||
on how to correctly set one up.
|
||||
|
||||
In some cases you might not want to run Synapse on the machine that has
|
||||
the `server_name` as its public DNS hostname, or you might want federation
|
||||
traffic to use a different port than 8448. For example, you might want to
|
||||
have your user names look like `@user:example.com`, but you want to run
|
||||
Synapse on `synapse.example.com` on port 443. This can be done using
|
||||
delegation, which allows an admin to control where federation traffic should
|
||||
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
|
||||
|
||||
Once federation has been configured, you should be able to join a room over
|
||||
federation. A good place to start is ``#synapse:matrix.org`` - a room for
|
||||
federation. A good place to start is `#synapse:matrix.org` - a room for
|
||||
Synapse admins.
|
||||
|
||||
|
||||
## Delegation
|
||||
|
||||
For a more flexible configuration, you can have ``server_name``
|
||||
resources (eg: ``@user:example.com``) served by a different host and
|
||||
port (eg: ``synapse.example.com:443``). There are two ways to do this:
|
||||
|
||||
- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
|
||||
- adding a DNS ``SRV`` record in the DNS zone of domain
|
||||
``example.com``.
|
||||
|
||||
Without configuring delegation, the matrix federation will
|
||||
expect to find your server via ``example.com:8448``. The following methods
|
||||
allow you retain a `server_name` of `example.com` so that your user IDs, room
|
||||
aliases, etc continue to look like `*:example.com`, whilst having your
|
||||
federation traffic routed to a different server.
|
||||
|
||||
### .well-known delegation
|
||||
|
||||
To use this method, you need to be able to alter the
|
||||
``server_name`` 's https server to serve the ``/.well-known/matrix/server``
|
||||
URL. Having an active server (with a valid TLS certificate) serving your
|
||||
``server_name`` domain is out of the scope of this documentation.
|
||||
|
||||
The URL ``https://<server_name>/.well-known/matrix/server`` should
|
||||
return a JSON structure containing the key ``m.server`` like so:
|
||||
|
||||
{
|
||||
"m.server": "<synapse.server.name>[:<yourport>]"
|
||||
}
|
||||
|
||||
In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
|
||||
should return:
|
||||
|
||||
{
|
||||
"m.server": "synapse.example.com:443"
|
||||
}
|
||||
|
||||
Note, specifying a port is optional. If a port is not specified an SRV lookup
|
||||
is performed, as described below. If the target of the
|
||||
delegation does not have an SRV record, then the port defaults to 8448.
|
||||
|
||||
Most installations will not need to configure .well-known. However, it can be
|
||||
useful in cases where the admin is hosting on behalf of someone else and
|
||||
therefore cannot gain access to the necessary certificate. With .well-known,
|
||||
federation servers will check for a valid TLS certificate for the delegated
|
||||
hostname (in our example: ``synapse.example.com``).
|
||||
|
||||
### DNS SRV delegation
|
||||
|
||||
To use this delegation method, you need to have write access to your
|
||||
``server_name`` 's domain zone DNS records (in our example it would be
|
||||
``example.com`` DNS zone).
|
||||
|
||||
This method requires the target server to provide a
|
||||
valid TLS certificate for the original ``server_name``.
|
||||
|
||||
You need to add a SRV record in your ``server_name`` 's DNS zone with
|
||||
this format:
|
||||
|
||||
_matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
|
||||
|
||||
In our example, we would need to add this SRV record in the
|
||||
``example.com`` DNS zone:
|
||||
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
|
||||
|
||||
Once done and set up, you can check the DNS record with ``dig -t srv
|
||||
_matrix._tcp.<server_name>``. In our example, we would expect this:
|
||||
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
|
||||
|
||||
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
### Delegation FAQ
|
||||
#### When do I need a SRV record or .well-known URI?
|
||||
|
||||
If your homeserver listens on the default federation port (8448), and your
|
||||
`server_name` points to the host that your homeserver runs on, you do not need an SRV
|
||||
record or `.well-known/matrix/server` URI.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host,
|
||||
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
|
||||
it would automatically generate a valid TLS certificate for you via Let's Encrypt
|
||||
and no SRV record or .well-known URI would be needed.
|
||||
|
||||
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||
does not point to the host that your homeserver runs on, you will need to let
|
||||
other servers know how to find it. The way to do this is via .well-known or an
|
||||
SRV record.
|
||||
|
||||
#### I have created a .well-known URI. Do I also need an SRV record?
|
||||
|
||||
No. You can use either `.well-known` delegation or use an SRV record for delegation. You
|
||||
do not need to use both to delegate to the same location.
|
||||
|
||||
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
|
||||
|
||||
Yes, you are welcome to manage your certificates yourself. Synapse will only
|
||||
attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||
so.The only requirement is that there is a valid TLS cert present for
|
||||
federation end points.
|
||||
|
||||
#### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
Practically speaking, this is no longer necessary.
|
||||
|
||||
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
|
||||
needs the certificate is to populate a legacy `tls_fingerprints` field in the
|
||||
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
|
||||
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
|
||||
and generally this is delegated via `matrix.org`, which will be running a modern
|
||||
version of Synapse.
|
||||
|
||||
#### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||
same certificate on any ports where TLS is configured.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can use the [federation tester](
|
||||
<https://matrix.org/federationtester>) to check if your homeserver is
|
||||
configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
|
||||
Note that you'll have to modify this URL to replace ``DOMAIN`` with your
|
||||
``server_name``. Hitting the API directly provides extra detail.
|
||||
You can use the [federation tester](https://matrix.org/federationtester)
|
||||
to check if your homeserver is configured correctly. Alternatively try the
|
||||
[JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
|
||||
Note that you'll have to modify this URL to replace `DOMAIN` with your
|
||||
`server_name`. Hitting the API directly provides extra detail.
|
||||
|
||||
The typical failure mode for federation is that when the server tries to join
|
||||
a room, it is rejected with "401: Unauthorized". Generally this means that other
|
||||
@@ -169,8 +47,8 @@ you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
## Running a Demo Federation of Synapses
|
||||
## Running a demo federation of Synapses
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||
private federation, there is a script in the `demo` directory. This is mainly
|
||||
useful just for development purposes. See [demo/README](<../demo/README>).
|
||||
|
||||
195
docs/message_retention_policies.md
Normal file
195
docs/message_retention_policies.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Message retention policies
|
||||
|
||||
Synapse admins can enable support for message retention policies on
|
||||
their homeserver. Message retention policies exist at a room level,
|
||||
follow the semantics described in
|
||||
[MSC1763](https://github.com/matrix-org/matrix-doc/blob/matthew/msc1763/proposals/1763-configurable-retention-periods.md),
|
||||
and allow server and room admins to configure how long messages should
|
||||
be kept in a homeserver's database before being purged from it.
|
||||
**Please note that, as this feature isn't part of the Matrix
|
||||
specification yet, this implementation is to be considered as
|
||||
experimental.**
|
||||
|
||||
A message retention policy is mainly defined by its `max_lifetime`
|
||||
parameter, which defines how long a message can be kept around after
|
||||
it was sent to the room. If a room doesn't have a message retention
|
||||
policy, and there's no default one for a given server, then no message
|
||||
sent in that room is ever purged on that server.
|
||||
|
||||
MSC1763 also specifies semantics for a `min_lifetime` parameter which
|
||||
defines the amount of time after which an event _can_ get purged (after
|
||||
it was sent to the room), but Synapse doesn't currently support it
|
||||
beyond registering it.
|
||||
|
||||
Both `max_lifetime` and `min_lifetime` are optional parameters.
|
||||
|
||||
Note that message retention policies don't apply to state events.
|
||||
|
||||
Once an event reaches its expiry date (defined as the time it was sent
|
||||
plus the value for `max_lifetime` in the room), two things happen:
|
||||
|
||||
* Synapse stops serving the event to clients via any endpoint.
|
||||
* The message gets picked up by the next purge job (see the "Purge jobs"
|
||||
section) and is removed from Synapse's database.
|
||||
|
||||
Since purge jobs don't run continuously, this means that an event might
|
||||
stay in a server's database for longer than the value for `max_lifetime`
|
||||
in the room would allow, though hidden from clients.
|
||||
|
||||
Similarly, if a server (with support for message retention policies
|
||||
enabled) receives from another server an event that should have been
|
||||
purged according to its room's policy, then the receiving server will
|
||||
process and store that event until it's picked up by the next purge job,
|
||||
though it will always hide it from clients.
|
||||
|
||||
Synapse requires at least one message in each room, so it will never
|
||||
delete the last message in a room. It will, however, hide it from
|
||||
clients.
|
||||
|
||||
|
||||
## Server configuration
|
||||
|
||||
Support for this feature can be enabled and configured in the
|
||||
`retention` section of the Synapse configuration file (see the
|
||||
[sample file](https://github.com/matrix-org/synapse/blob/v1.7.3/docs/sample_config.yaml#L332-L393)).
|
||||
|
||||
To enable support for message retention policies, set the setting
|
||||
`enabled` in this section to `true`.
|
||||
|
||||
|
||||
### Default policy
|
||||
|
||||
A default message retention policy is a policy defined in Synapse's
|
||||
configuration that is used by Synapse for every room that doesn't have a
|
||||
message retention policy configured in its state. This allows server
|
||||
admins to ensure that messages are never kept indefinitely in a server's
|
||||
database.
|
||||
|
||||
A default policy can be defined as such, in the `retention` section of
|
||||
the configuration file:
|
||||
|
||||
```yaml
|
||||
default_policy:
|
||||
min_lifetime: 1d
|
||||
max_lifetime: 1y
|
||||
```
|
||||
|
||||
Here, `min_lifetime` and `max_lifetime` have the same meaning and level
|
||||
of support as previously described. They can be expressed either as a
|
||||
duration (using the units `s` (seconds), `m` (minutes), `h` (hours),
|
||||
`d` (days), `w` (weeks) and `y` (years)) or as a number of milliseconds.
|
||||
|
||||
|
||||
### Purge jobs
|
||||
|
||||
Purge jobs are the jobs that Synapse runs in the background to purge
|
||||
expired events from the database. They are only run if support for
|
||||
message retention policies is enabled in the server's configuration. If
|
||||
no configuration for purge jobs is configured by the server admin,
|
||||
Synapse will use a default configuration, which is described in the
|
||||
[sample configuration file](https://github.com/matrix-org/synapse/blob/master/docs/sample_config.yaml#L332-L393).
|
||||
|
||||
Some server admins might want a finer control on when events are removed
|
||||
depending on an event's room's policy. This can be done by setting the
|
||||
`purge_jobs` sub-section in the `retention` section of the configuration
|
||||
file. An example of such configuration could be:
|
||||
|
||||
```yaml
|
||||
purge_jobs:
|
||||
- longest_max_lifetime: 3d
|
||||
interval: 12h
|
||||
- shortest_max_lifetime: 3d
|
||||
longest_max_lifetime: 1w
|
||||
interval: 1d
|
||||
- shortest_max_lifetime: 1w
|
||||
interval: 2d
|
||||
```
|
||||
|
||||
In this example, we define three jobs:
|
||||
|
||||
* one that runs twice a day (every 12 hours) and purges events in rooms
|
||||
which policy's `max_lifetime` is lower or equal to 3 days.
|
||||
* one that runs once a day and purges events in rooms which policy's
|
||||
`max_lifetime` is between 3 days and a week.
|
||||
* one that runs once every 2 days and purges events in rooms which
|
||||
policy's `max_lifetime` is greater than a week.
|
||||
|
||||
Note that this example is tailored to show different configurations and
|
||||
features slightly more jobs than it's probably necessary (in practice, a
|
||||
server admin would probably consider it better to replace the two last
|
||||
jobs with one that runs once a day and handles rooms which which
|
||||
policy's `max_lifetime` is greater than 3 days).
|
||||
|
||||
Keep in mind, when configuring these jobs, that a purge job can become
|
||||
quite heavy on the server if it targets many rooms, therefore prefer
|
||||
having jobs with a low interval that target a limited set of rooms. Also
|
||||
make sure to include a job with no minimum and one with no maximum to
|
||||
make sure your configuration handles every policy.
|
||||
|
||||
As previously mentioned in this documentation, while a purge job that
|
||||
runs e.g. every day means that an expired event might stay in the
|
||||
database for up to a day after its expiry, Synapse hides expired events
|
||||
from clients as soon as they expire, so the event is not visible to
|
||||
local users between its expiry date and the moment it gets purged from
|
||||
the server's database.
|
||||
|
||||
|
||||
### Lifetime limits
|
||||
|
||||
**Note: this feature is mainly useful within a closed federation or on
|
||||
servers that don't federate, because there currently is no way to
|
||||
enforce these limits in an open federation.**
|
||||
|
||||
Server admins can restrict the values their local users are allowed to
|
||||
use for both `min_lifetime` and `max_lifetime`. These limits can be
|
||||
defined as such in the `retention` section of the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
allowed_lifetime_max: 1y
|
||||
```
|
||||
|
||||
Here, `allowed_lifetime_min` is the lowest value a local user can set
|
||||
for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max`
|
||||
is the highest value. Both parameters are optional (e.g. setting
|
||||
`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a
|
||||
minimum and no maximum).
|
||||
|
||||
Like other settings in this section, these parameters can be expressed
|
||||
either as a duration or as a number of milliseconds.
|
||||
|
||||
|
||||
## Room configuration
|
||||
|
||||
To configure a room's message retention policy, a room's admin or
|
||||
moderator needs to send a state event in that room with the type
|
||||
`m.room.retention` and the following content:
|
||||
|
||||
```json
|
||||
{
|
||||
"max_lifetime": ...
|
||||
}
|
||||
```
|
||||
|
||||
In this event's content, the `max_lifetime` parameter has the same
|
||||
meaning as previously described, and needs to be expressed in
|
||||
milliseconds. The event's content can also include a `min_lifetime`
|
||||
parameter, which has the same meaning and limited support as previously
|
||||
described.
|
||||
|
||||
Note that over every server in the room, only the ones with support for
|
||||
message retention policies will actually remove expired events. This
|
||||
support is currently not enabled by default in Synapse.
|
||||
|
||||
|
||||
## Note on reclaiming disk space
|
||||
|
||||
While purge jobs actually delete data from the database, the disk space
|
||||
used by the database might not decrease immediately on the database's
|
||||
host. However, even though the database engine won't free up the disk
|
||||
space, it will start writing new data into where the purged data was.
|
||||
|
||||
If you want to reclaim the freed disk space anyway and return it to the
|
||||
operating system, the server admin needs to run `VACUUM FULL;` (or
|
||||
`VACUUM;` for SQLite databases) on Synapse's database (see the related
|
||||
[PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-vacuum.html)).
|
||||
@@ -32,7 +32,7 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
|
||||
su - postgres
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
|
||||
Then, create a user ``synapse_user`` with:
|
||||
|
||||
createuser --pwprompt synapse_user
|
||||
@@ -63,6 +63,24 @@ You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/11/auth-pg-hba-conf.html>.
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as per
|
||||
[docs/postgres.md](docs/postgres.md)). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
||||
@@ -18,9 +18,10 @@ When setting up a reverse proxy, remember that Matrix clients and other
|
||||
Matrix servers do not necessarily need to connect to your server via the
|
||||
same server name or port. Indeed, clients will use port 443 by default,
|
||||
whereas servers default to port 8448. Where these are different, we
|
||||
refer to the 'client port' and the \'federation port\'. See [Setting
|
||||
up federation](federate.md) for more details of the algorithm used for
|
||||
federation connections.
|
||||
refer to the 'client port' and the \'federation port\'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
`https://matrix.example.com`, and other servers to connect at
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The config is maintained as an up-to-date snapshot of the default
|
||||
# This file is maintained as an up-to-date snapshot of the default
|
||||
# homeserver.yaml configuration generated by Synapse.
|
||||
#
|
||||
# It is intended to act as a reference for the default configuration,
|
||||
@@ -10,6 +10,16 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
################################################################################
|
||||
|
||||
# Configuration file for Synapse.
|
||||
#
|
||||
# This is a YAML file: see [1] for a quick introduction. Note in particular
|
||||
# that *indentation is important*: all the elements of a list or dictionary
|
||||
# should have the same indentation.
|
||||
#
|
||||
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
|
||||
|
||||
## Server ##
|
||||
|
||||
# The domain name of the server, with optional explicit port.
|
||||
@@ -387,17 +397,17 @@ retention:
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
# interval: 1d
|
||||
|
||||
|
||||
## TLS ##
|
||||
@@ -466,6 +476,11 @@ retention:
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
|
||||
# ACME v2. This means that this feature currently won't work with installs set
|
||||
# up after November 2019. For more info, and alternative solutions, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
#
|
||||
# Note that provisioning a certificate in this way requires port 80 to be
|
||||
# routed to Synapse so that it can complete the http-01 ACME challenge.
|
||||
# By default, if you enable ACME support, Synapse will attempt to listen on
|
||||
@@ -874,23 +889,6 @@ media_store_path: "DATADIR/media_store"
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# ``enabled`` defines whether the account validity feature is enabled. Defaults
|
||||
# to False.
|
||||
#
|
||||
# ``period`` allows setting the period after which an account is valid
|
||||
# after its registration. When renewing the account, its validity period
|
||||
# will be extended by this amount of time. This parameter is required when using
|
||||
# the account validity feature.
|
||||
#
|
||||
# ``renew_at`` is the amount of time before an account's expiry date at which
|
||||
# Synapse will send an email to the account's email address with a renewal link.
|
||||
# This needs the ``email`` and ``public_baseurl`` configuration sections to be
|
||||
# filled.
|
||||
#
|
||||
# ``renew_email_subject`` is the subject of the email sent out with the renewal
|
||||
# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter
|
||||
# from the ``email`` section.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
@@ -901,21 +899,55 @@ media_store_path: "DATADIR/media_store"
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: true
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||
# # user when trying to renew an account. Optional, defaults to
|
||||
# # synapse/res/templates.
|
||||
# template_dir: "res/templates"
|
||||
# # HTML to be displayed to the user after they successfully renewed their
|
||||
# # account. Optional.
|
||||
# account_renewed_html_path: "account_renewed.html"
|
||||
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||
# # renewal token. Optional.
|
||||
# invalid_token_html_path: "invalid_token.html"
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
@@ -1353,107 +1385,110 @@ password_config:
|
||||
#pepper: "EVEN_MORE_SECRET"
|
||||
|
||||
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
email:
|
||||
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
|
||||
#
|
||||
#smtp_host: mail.server
|
||||
|
||||
# Enable sending emails for password resets, notification events or
|
||||
# account expiry notices
|
||||
#
|
||||
# If your SMTP server requires authentication, the optional smtp_user &
|
||||
# smtp_pass variables should be used
|
||||
#
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: false
|
||||
#
|
||||
# # notif_from defines the "From" address to use when sending emails.
|
||||
# # It must be set if email sending is enabled.
|
||||
# #
|
||||
# # The placeholder '%(app)s' will be replaced by the application name,
|
||||
# # which is normally 'app_name' (below), but may be overridden by the
|
||||
# # Matrix client application.
|
||||
# #
|
||||
# # Note that the placeholder must be written '%(app)s', including the
|
||||
# # trailing 's'.
|
||||
# #
|
||||
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
#
|
||||
# # app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# # defaults to 'Matrix'.
|
||||
# #
|
||||
# #app_name: my_branded_matrix_server
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
# notif_for_new_users: true
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
# # the "app_name" setting is ignored
|
||||
# #
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Configure the time that a validation email or text message code
|
||||
# # will expire after sending
|
||||
# #
|
||||
# # This is currently used for password resets
|
||||
# #
|
||||
# #validation_token_lifetime: 1h
|
||||
#
|
||||
# # Template directory. All template files should be stored within this
|
||||
# # directory. If not set, default templates from within the Synapse
|
||||
# # package will be used
|
||||
# #
|
||||
# # For the list of default templates, please see
|
||||
# # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
# #
|
||||
# #template_dir: res/templates
|
||||
#
|
||||
# # Templates for email notifications
|
||||
# #
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
#
|
||||
# # Templates for account expiry notices
|
||||
# #
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
#
|
||||
# # Templates for password reset emails sent by the homeserver
|
||||
# #
|
||||
# #password_reset_template_html: password_reset.html
|
||||
# #password_reset_template_text: password_reset.txt
|
||||
#
|
||||
# # Templates for registration emails sent by the homeserver
|
||||
# #
|
||||
# #registration_template_html: registration.html
|
||||
# #registration_template_text: registration.txt
|
||||
#
|
||||
# # Templates for validation emails sent by the homeserver when adding an email to
|
||||
# # your user account
|
||||
# #
|
||||
# #add_threepid_template_html: add_threepid.html
|
||||
# #add_threepid_template_text: add_threepid.txt
|
||||
#
|
||||
# # Templates for password reset success and failure pages that a user
|
||||
# # will see after attempting to reset their password
|
||||
# #
|
||||
# #password_reset_template_success_html: password_reset_success.html
|
||||
# #password_reset_template_failure_html: password_reset_failure.html
|
||||
#
|
||||
# # Templates for registration success and failure pages that a user
|
||||
# # will see after attempting to register using an email or phone
|
||||
# #
|
||||
# #registration_template_success_html: registration_success.html
|
||||
# #registration_template_failure_html: registration_failure.html
|
||||
#
|
||||
# # Templates for success and failure pages that a user will see after attempting
|
||||
# # to add an email or phone to their account
|
||||
# #
|
||||
# #add_threepid_success_html: add_threepid_success.html
|
||||
# #add_threepid_failure_html: add_threepid_failure.html
|
||||
# The port on the mail server for outgoing SMTP. Defaults to 25.
|
||||
#
|
||||
#smtp_port: 587
|
||||
|
||||
# Username/password for authentication to the SMTP server. By default, no
|
||||
# authentication is attempted.
|
||||
#
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
|
||||
# Uncomment the following to require TLS transport security for SMTP.
|
||||
# By default, Synapse will connect over plain text, and will then switch to
|
||||
# TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
|
||||
# Synapse will refuse to connect unless the server supports STARTTLS.
|
||||
#
|
||||
#require_transport_security: true
|
||||
|
||||
# Enable sending emails for messages that the user has missed
|
||||
#
|
||||
#enable_notifs: false
|
||||
|
||||
# notif_from defines the "From" address to use when sending emails.
|
||||
# It must be set if email sending is enabled.
|
||||
#
|
||||
# The placeholder '%(app)s' will be replaced by the application name,
|
||||
# which is normally 'app_name' (below), but may be overridden by the
|
||||
# Matrix client application.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
#notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
|
||||
# app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# defaults to 'Matrix'.
|
||||
#
|
||||
#app_name: my_branded_matrix_server
|
||||
|
||||
# Uncomment the following to disable automatic subscription to email
|
||||
# notifications for new users. Enabled by default.
|
||||
#
|
||||
#notif_for_new_users: false
|
||||
|
||||
# Custom URL for client links within the email notifications. By default
|
||||
# links will be based on "https://matrix.to".
|
||||
#
|
||||
# (This setting used to be called riot_base_url; the old name is still
|
||||
# supported for backwards-compatibility but is now deprecated.)
|
||||
#
|
||||
#client_base_url: "http://localhost/riot"
|
||||
|
||||
# Configure the time that a validation email will expire after sending.
|
||||
# Defaults to 1h.
|
||||
#
|
||||
#validation_token_lifetime: 15m
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * The contents of email notifications of missed events: 'notif_mail.html' and
|
||||
# 'notif_mail.txt'.
|
||||
#
|
||||
# * The contents of account expiry notice emails: 'notice_expiry.html' and
|
||||
# 'notice_expiry.txt'.
|
||||
#
|
||||
# * The contents of password reset emails sent by the homeserver:
|
||||
# 'password_reset.html' and 'password_reset.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in the password reset email: 'password_reset_success.html' and
|
||||
# 'password_reset_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent during registration:
|
||||
# 'registration.html' and 'registration.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent during registration:
|
||||
# 'registration_success.html' and 'registration_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent when an address is added
|
||||
# to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent when an address is added
|
||||
# to a Matrix account: 'add_threepid_success.html' and
|
||||
# 'add_threepid_failure.html'
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
|
||||
#password_providers:
|
||||
|
||||
88
docs/spam_checker.md
Normal file
88
docs/spam_checker.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# Handling spam in Synapse
|
||||
|
||||
Synapse has support to customize spam checking behavior. It can plug into a
|
||||
variety of events and affect how they are presented to users on your homeserver.
|
||||
|
||||
The spam checking behavior is implemented as a Python class, which must be
|
||||
able to be imported by the running Synapse.
|
||||
|
||||
## Python spam checker class
|
||||
|
||||
The Python class is instantiated with two objects:
|
||||
|
||||
* Any configuration (see below).
|
||||
* An instance of `synapse.spam_checker_api.SpamCheckerApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
|
||||
* `user_may_invite`
|
||||
* `user_may_create_room`
|
||||
* `user_may_create_room_alias`
|
||||
* `user_may_publish_room`
|
||||
|
||||
The details of the each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
|
||||
The `SpamCheckerApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals. It currently implements the following
|
||||
methods:
|
||||
|
||||
* `get_state_events_in_room`
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
class ExampleSpamChecker:
|
||||
def __init__(self, config, api):
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
return True # allow all invites
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
return True # allow all room creations
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
return True # allow all room aliases
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
return True # allow publishing of all rooms
|
||||
|
||||
def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Modify the `spam_checker` section of your `homeserver.yaml` in the following
|
||||
manner:
|
||||
|
||||
`module` should point to the fully qualified Python class that implements your
|
||||
custom logic, e.g. `my_module.ExampleSpamChecker`.
|
||||
|
||||
`config` is a dictionary that gets passed to the spam checker class.
|
||||
|
||||
### Example
|
||||
|
||||
This section might look like:
|
||||
|
||||
```yaml
|
||||
spam_checker:
|
||||
module: my_module.ExampleSpamChecker
|
||||
config:
|
||||
# Enable or disable a specific option in ExampleSpamChecker.
|
||||
my_custom_option: true
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged
|
||||
example using the Synapse spam checking API, including a bot for dynamic
|
||||
configuration.
|
||||
@@ -209,7 +209,7 @@ Where `<token>` may be either:
|
||||
* a numeric stream_id to stream updates since (exclusive)
|
||||
* `NOW` to stream all subsequent updates.
|
||||
|
||||
The `<stream_name>` is the name of a replication stream to subscribe
|
||||
The `<stream_name>` is the name of a replication stream to subscribe
|
||||
to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
|
||||
of streams). It can also be `ALL` to subscribe to all known streams,
|
||||
in which case the `<token>` must be set to `NOW`.
|
||||
@@ -234,6 +234,10 @@ in which case the `<token>` must be set to `NOW`.
|
||||
|
||||
Used exclusively in tests
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
|
||||
See `synapse/replication/tcp/commands.py` for a detailed description and
|
||||
the format of each command.
|
||||
|
||||
@@ -250,6 +254,11 @@ and they key to invalidate. For example:
|
||||
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
Alternatively, an entire cache can be invalidated by sending down a `null`
|
||||
instead of the key. For example:
|
||||
|
||||
> RDATA caches 550953772 ["get_user_by_id", null, 1550574873252]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated
|
||||
at the same time with the same key. To reduce traffic we batch those
|
||||
invalidations into a single poke by defining a special cache name that
|
||||
|
||||
@@ -168,20 +168,42 @@ endpoints matching the following regular expressions:
|
||||
^/_matrix/federation/v1/make_join/
|
||||
^/_matrix/federation/v1/make_leave/
|
||||
^/_matrix/federation/v1/send_join/
|
||||
^/_matrix/federation/v2/send_join/
|
||||
^/_matrix/federation/v1/send_leave/
|
||||
^/_matrix/federation/v2/send_leave/
|
||||
^/_matrix/federation/v1/invite/
|
||||
^/_matrix/federation/v2/invite/
|
||||
^/_matrix/federation/v1/query_auth/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/federation/v1/send/
|
||||
^/_matrix/federation/v1/get_groups_publicised$
|
||||
^/_matrix/key/v2/query
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/federation/v1/groups/
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
||||
instance.
|
||||
|
||||
Note that `federation` must be added to the listener resources in the worker config:
|
||||
|
||||
```yaml
|
||||
worker_app: synapse.app.federation_reader
|
||||
...
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: <port>
|
||||
resources:
|
||||
- names:
|
||||
- federation
|
||||
```
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
@@ -199,7 +221,9 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
^/_synapse/admin/v1/purge_media_cache$
|
||||
^/_synapse/admin/v1/room/.*/media$
|
||||
^/_synapse/admin/v1/room/.*/media.*$
|
||||
^/_synapse/admin/v1/user/.*/media.*$
|
||||
^/_synapse/admin/v1/media/.*$
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
@@ -236,10 +260,14 @@ following regular expressions:
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance:
|
||||
@@ -260,6 +288,10 @@ the following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||
|
||||
When using this worker you must also set `update_user_directory: False` in the
|
||||
shared configuration file to stop the main synapse running background
|
||||
jobs related to updating the user directory.
|
||||
|
||||
### `synapse.app.frontend_proxy`
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
@@ -288,6 +320,7 @@ file. For example:
|
||||
Handles some event creation. It can handle REST endpoints matching:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
12
mypy.ini
12
mypy.ini
@@ -7,6 +7,9 @@ show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -63,3 +66,12 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-sentry_sdk]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-PIL.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
|
||||
@@ -22,10 +22,12 @@ import yaml
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
import synapse
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("update_database")
|
||||
|
||||
@@ -38,6 +40,8 @@ class MockHomeserver(HomeServer):
|
||||
config.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
self.version_string = "Synapse/"+get_version_string(synapse)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -81,15 +85,17 @@ if __name__ == "__main__":
|
||||
hs.setup()
|
||||
store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run_background_updates():
|
||||
yield store.db.updates.run_background_updates(sleep=False)
|
||||
async def run_background_updates():
|
||||
await store.db.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
# Apply all background updates on the database.
|
||||
reactor.callWhenRunning(
|
||||
lambda: run_as_background_process("background_updates", run_background_updates)
|
||||
)
|
||||
def run():
|
||||
# Apply all background updates on the database.
|
||||
defer.ensureDeferred(
|
||||
run_as_background_process("background_updates", run_background_updates)
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(run)
|
||||
|
||||
reactor.run()
|
||||
|
||||
@@ -52,7 +52,7 @@ if __name__ == "__main__":
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", {})
|
||||
password_config = config.get("password_config", None) or {}
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
|
||||
@@ -27,13 +27,16 @@ from six import string_types
|
||||
|
||||
import yaml
|
||||
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
import synapse
|
||||
from synapse.config.database import DatabaseConnectionConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.storage._base import LoggingTransaction
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.deviceinbox import (
|
||||
DeviceInboxBackgroundUpdateStore,
|
||||
@@ -61,6 +64,7 @@ from synapse.storage.database import Database, make_conn
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.util import Clock
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
@@ -125,6 +129,13 @@ APPEND_ONLY_TABLES = [
|
||||
]
|
||||
|
||||
|
||||
# Error returned by the run function. Used at the top-level part of the script to
|
||||
# handle errors and return codes.
|
||||
end_error = None
|
||||
# The exec_info for the error, if any. If error is defined but not exec_info the script
|
||||
# will show only the error message without the stacktrace, if exec_info is defined but
|
||||
# not the error then the script will show nothing outside of what's printed in the run
|
||||
# function. If both are defined, the script will print both the error and the stacktrace.
|
||||
end_error_exec_info = None
|
||||
|
||||
|
||||
@@ -177,6 +188,7 @@ class MockHomeserver:
|
||||
self.clock = Clock(reactor)
|
||||
self.config = config
|
||||
self.hostname = config.server_name
|
||||
self.version_string = "Synapse/"+get_version_string(synapse)
|
||||
|
||||
def get_clock(self):
|
||||
return self.clock
|
||||
@@ -189,11 +201,10 @@ class Porter(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def setup_table(self, table):
|
||||
async def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = yield self.postgres_store.db.simple_select_one(
|
||||
row = await self.postgres_store.db.simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
@@ -207,10 +218,10 @@ class Porter(object):
|
||||
forward_chunk,
|
||||
already_ported,
|
||||
total_to_port,
|
||||
) = yield self._setup_sent_transactions()
|
||||
) = await self._setup_sent_transactions()
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
@@ -227,7 +238,7 @@ class Porter(object):
|
||||
backward_chunk = row["backward_rowid"]
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
already_ported, total_to_port = await self._get_total_count_to_port(
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
else:
|
||||
@@ -238,9 +249,9 @@ class Porter(object):
|
||||
)
|
||||
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
||||
|
||||
yield self.postgres_store.execute(delete_all)
|
||||
await self.postgres_store.execute(delete_all)
|
||||
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||
)
|
||||
@@ -248,16 +259,13 @@ class Porter(object):
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
already_ported, total_to_port = await self._get_total_count_to_port(
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||
)
|
||||
return table, already_ported, total_to_port, forward_chunk, backward_chunk
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(
|
||||
async def handle_table(
|
||||
self, table, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
logger.info(
|
||||
@@ -275,7 +283,7 @@ class Porter(object):
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(
|
||||
await self.handle_search_table(
|
||||
postgres_size, table_size, forward_chunk, backward_chunk
|
||||
)
|
||||
return
|
||||
@@ -294,7 +302,7 @@ class Porter(object):
|
||||
if table == "user_directory_stream_pos":
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db.simple_insert(
|
||||
table=table, values={"stream_id": None}
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
@@ -335,7 +343,7 @@ class Porter(object):
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.db.runInteraction(
|
||||
headers, frows, brows = await self.sqlite_store.db.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
@@ -361,7 +369,7 @@ class Porter(object):
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
await self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
@@ -369,8 +377,7 @@ class Porter(object):
|
||||
else:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(
|
||||
async def handle_search_table(
|
||||
self, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
select = (
|
||||
@@ -390,7 +397,7 @@ class Porter(object):
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
@@ -438,7 +445,7 @@ class Porter(object):
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
await self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
@@ -447,20 +454,15 @@ class Porter(object):
|
||||
else:
|
||||
return
|
||||
|
||||
def setup_db(self, db_config: DatabaseConnectionConfig, engine):
|
||||
db_conn = make_conn(db_config, engine)
|
||||
prepare_database(db_conn, engine, config=None)
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
return db_conn
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def build_db_store(self, db_config: DatabaseConnectionConfig):
|
||||
def build_db_store(
|
||||
self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
|
||||
):
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
Args:
|
||||
config: The database configuration
|
||||
db_config: The database configuration
|
||||
allow_outdated_version: True to suppress errors about the database server
|
||||
version being too old to run a complete synapse
|
||||
|
||||
Returns:
|
||||
The built Store object.
|
||||
@@ -468,24 +470,23 @@ class Porter(object):
|
||||
self.progress.set_state("Preparing %s" % db_config.config["name"])
|
||||
|
||||
engine = create_engine(db_config.config)
|
||||
conn = self.setup_db(db_config, engine)
|
||||
|
||||
hs = MockHomeserver(self.hs_config)
|
||||
|
||||
store = Store(Database(hs, db_config, engine), conn, hs)
|
||||
|
||||
yield store.db.runInteraction(
|
||||
"%s_engine.check_database" % db_config.config["name"],
|
||||
engine.check_database,
|
||||
)
|
||||
with make_conn(db_config, engine) as db_conn:
|
||||
engine.check_database(
|
||||
db_conn, allow_outdated_version=allow_outdated_version
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
store = Store(Database(hs, db_config, engine), db_conn, hs)
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run_background_updates_on_postgres(self):
|
||||
async def run_background_updates_on_postgres(self):
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = (
|
||||
yield self.postgres_store.db.updates.has_completed_background_updates()
|
||||
await self.postgres_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
if not postgres_ready:
|
||||
@@ -494,35 +495,44 @@ class Porter(object):
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
yield self.postgres_store.db.updates.do_next_background_update(100)
|
||||
postgres_ready = yield (
|
||||
await self.postgres_store.db.updates.do_next_background_update(100)
|
||||
postgres_ready = await (
|
||||
self.postgres_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run(self):
|
||||
async def run(self):
|
||||
"""Ports the SQLite database to a PostgreSQL database.
|
||||
|
||||
When a fatal error is met, its message is assigned to the global "end_error"
|
||||
variable. When this error comes with a stacktrace, its exec_info is assigned to
|
||||
the global "end_error_exec_info" variable.
|
||||
"""
|
||||
global end_error
|
||||
|
||||
try:
|
||||
self.sqlite_store = yield self.build_db_store(
|
||||
DatabaseConnectionConfig("master-sqlite", self.sqlite_config)
|
||||
# we allow people to port away from outdated versions of sqlite.
|
||||
self.sqlite_store = self.build_db_store(
|
||||
DatabaseConnectionConfig("master-sqlite", self.sqlite_config),
|
||||
allow_outdated_version=True,
|
||||
)
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
yield self.sqlite_store.db.updates.has_completed_background_updates()
|
||||
await self.sqlite_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
if not updates_complete:
|
||||
sys.stderr.write(
|
||||
end_error = (
|
||||
"Pending background updates exist in the SQLite3 database."
|
||||
" Please start Synapse again and wait until every update has finished"
|
||||
" before running this script.\n"
|
||||
)
|
||||
defer.returnValue(None)
|
||||
return
|
||||
|
||||
self.postgres_store = yield self.build_db_store(
|
||||
self.postgres_store = self.build_db_store(
|
||||
self.hs_config.get_single_database()
|
||||
)
|
||||
|
||||
yield self.run_background_updates_on_postgres()
|
||||
await self.run_background_updates_on_postgres()
|
||||
|
||||
self.progress.set_state("Creating port tables")
|
||||
|
||||
@@ -550,22 +560,22 @@ class Porter(object):
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.db.runInteraction("alter_table", alter_table)
|
||||
await self.postgres_store.db.runInteraction("alter_table", alter_table)
|
||||
except Exception:
|
||||
# On Error Resume Next
|
||||
pass
|
||||
|
||||
yield self.postgres_store.db.runInteraction(
|
||||
await self.postgres_store.db.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store.db.simple_select_onecol(
|
||||
sqlite_tables = await self.sqlite_store.db.simple_select_onecol(
|
||||
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||
)
|
||||
|
||||
postgres_tables = yield self.postgres_store.db.simple_select_onecol(
|
||||
postgres_tables = await self.postgres_store.db.simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
@@ -576,28 +586,34 @@ class Porter(object):
|
||||
|
||||
# Step 3. Figure out what still needs copying
|
||||
self.progress.set_state("Checking on port progress")
|
||||
setup_res = yield defer.gatherResults(
|
||||
[
|
||||
self.setup_table(table)
|
||||
for table in tables
|
||||
if table not in ["schema_version", "applied_schema_deltas"]
|
||||
and not table.startswith("sqlite_")
|
||||
],
|
||||
consumeErrors=True,
|
||||
setup_res = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(self.setup_table, table)
|
||||
for table in tables
|
||||
if table not in ["schema_version", "applied_schema_deltas"]
|
||||
and not table.startswith("sqlite_")
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Step 4. Do the copying.
|
||||
self.progress.set_state("Copying to postgres")
|
||||
yield defer.gatherResults(
|
||||
[self.handle_table(*res) for res in setup_res], consumeErrors=True
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[run_in_background(self.handle_table, *res) for res in setup_res],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Step 5. Do final post-processing
|
||||
yield self._setup_state_group_id_seq()
|
||||
await self._setup_state_group_id_seq()
|
||||
|
||||
self.progress.done()
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
global end_error_exec_info
|
||||
end_error = e
|
||||
end_error_exec_info = sys.exc_info()
|
||||
logger.exception("")
|
||||
finally:
|
||||
@@ -637,8 +653,7 @@ class Porter(object):
|
||||
|
||||
return outrows
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
async def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
|
||||
@@ -659,7 +674,7 @@ class Porter(object):
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
@@ -672,7 +687,7 @@ class Porter(object):
|
||||
txn, "sent_transactions", headers[1:], rows
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
await self.postgres_store.execute(insert)
|
||||
else:
|
||||
max_inserted_rowid = 0
|
||||
|
||||
@@ -689,10 +704,10 @@ class Porter(object):
|
||||
else:
|
||||
return 1
|
||||
|
||||
next_chunk = yield self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = await self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
@@ -708,46 +723,49 @@ class Porter(object):
|
||||
(size,) = txn.fetchone()
|
||||
return int(size)
|
||||
|
||||
remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
|
||||
remaining_count = await self.sqlite_store.execute(get_sent_table_size)
|
||||
|
||||
total_count = remaining_count + inserted_rows
|
||||
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
return next_chunk, inserted_rows, total_count
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
|
||||
)
|
||||
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
brows = await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
return frows[0][0] + brows[0][0]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
rows = yield self.postgres_store.execute_sql(
|
||||
async def _get_already_ported_count(self, table):
|
||||
rows = await self.postgres_store.execute_sql(
|
||||
"SELECT count(*) FROM %s" % (table,)
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
return rows[0][0]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
remaining, done = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._get_remaining_count_to_port,
|
||||
table,
|
||||
forward_chunk,
|
||||
backward_chunk,
|
||||
),
|
||||
run_in_background(self._get_already_ported_count, table),
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
remaining = int(remaining) if remaining else 0
|
||||
done = int(done) if done else 0
|
||||
|
||||
defer.returnValue((done, remaining + done))
|
||||
return done, remaining + done
|
||||
|
||||
def _setup_state_group_id_seq(self):
|
||||
def r(txn):
|
||||
@@ -1013,7 +1031,12 @@ if __name__ == "__main__":
|
||||
hs_config=config,
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(porter.run)
|
||||
@defer.inlineCallbacks
|
||||
def run():
|
||||
with LoggingContext("synapse_port_db_run"):
|
||||
yield defer.ensureDeferred(porter.run())
|
||||
|
||||
reactor.callWhenRunning(run)
|
||||
|
||||
reactor.run()
|
||||
|
||||
@@ -1022,7 +1045,11 @@ if __name__ == "__main__":
|
||||
else:
|
||||
start()
|
||||
|
||||
if end_error_exec_info:
|
||||
exc_type, exc_value, exc_traceback = end_error_exec_info
|
||||
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
||||
if end_error:
|
||||
if end_error_exec_info:
|
||||
exc_type, exc_value, exc_traceback = end_error_exec_info
|
||||
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
||||
|
||||
sys.stderr.write(end_error)
|
||||
|
||||
sys.exit(5)
|
||||
|
||||
@@ -36,7 +36,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.8.0"
|
||||
__version__ = "1.11.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, Tuple
|
||||
from typing import Optional
|
||||
|
||||
from six import itervalues
|
||||
|
||||
@@ -34,8 +34,10 @@ from synapse.api.errors import (
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import UserID
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -78,32 +80,48 @@ class Auth(object):
|
||||
self._account_validity = hs.config.account_validity
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version, event, context, do_sig_check=True):
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids()
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||
"""Check if the user is currently joined in the room
|
||||
def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
current_state: Optional[StateMap[EventBase]] = None,
|
||||
allow_departed_users: bool = False,
|
||||
):
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id(str): The room to check.
|
||||
user_id(str): The user to check.
|
||||
current_state(dict): Optional map of the current state of the room.
|
||||
room_id: The room to check.
|
||||
|
||||
user_id: The user to check.
|
||||
|
||||
current_state: Optional map of the current state of the room.
|
||||
If provided then that map is used to check whether they are a
|
||||
member of the room. Otherwise the current membership is
|
||||
loaded from the database.
|
||||
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed.
|
||||
|
||||
Raises:
|
||||
AuthError if the user is not in the room.
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
A deferred membership event for the user if the user is in
|
||||
the room.
|
||||
Deferred[Optional[EventBase]]:
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
if current_state:
|
||||
member = current_state.get((EventTypes.Member, user_id), None)
|
||||
@@ -111,37 +129,19 @@ class Auth(object):
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
|
||||
self._check_joined_room(member, user_id, room_id)
|
||||
return member
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_was_in_room(self, room_id, user_id):
|
||||
"""Check if the user was in the room at some point.
|
||||
Args:
|
||||
room_id(str): The room to check.
|
||||
user_id(str): The user to check.
|
||||
Raises:
|
||||
AuthError if the user was never in the room.
|
||||
Returns:
|
||||
A deferred membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership not in (Membership.JOIN, Membership.LEAVE):
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
|
||||
if membership == Membership.LEAVE:
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = yield self.store.did_forget(user_id, room_id)
|
||||
if forgot:
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
if not forgot:
|
||||
return member
|
||||
|
||||
return member
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
@@ -149,12 +149,6 @@ class Auth(object):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
|
||||
def _check_joined_room(self, member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
raise AuthError(
|
||||
403, "User %s not in room %s (%s)" % (user_id, room_id, repr(member))
|
||||
)
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
@@ -509,10 +503,7 @@ class Auth(object):
|
||||
return self.store.is_server_admin(user)
|
||||
|
||||
def compute_auth_events(
|
||||
self,
|
||||
event,
|
||||
current_state_ids: Dict[Tuple[str, str], str],
|
||||
for_verification: bool = False,
|
||||
self, event, current_state_ids: StateMap[str], for_verification: bool = False,
|
||||
):
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
@@ -561,7 +552,7 @@ class Auth(object):
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
yield self.check_user_in_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
@@ -634,10 +625,18 @@ class Auth(object):
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_in_room_or_world_readable(self, room_id, user_id):
|
||||
def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, user_id: str, allow_departed_users: bool = False
|
||||
):
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Args:
|
||||
room_id: room to check
|
||||
user_id: user to check
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
@@ -646,12 +645,14 @@ class Auth(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# check_user_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
member_event = yield self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
@@ -663,7 +664,9 @@ class Auth(object):
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -77,12 +77,11 @@ class EventTypes(object):
|
||||
Aliases = "m.room.aliases"
|
||||
Redaction = "m.room.redaction"
|
||||
ThirdPartyInvite = "m.room.third_party_invite"
|
||||
Encryption = "m.room.encryption"
|
||||
RelatedGroups = "m.room.related_groups"
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
Encryption = "m.room.encryption"
|
||||
Encrypted = "m.room.encrypted"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
@@ -17,13 +17,15 @@
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import logging
|
||||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from six import iteritems
|
||||
from six.moves import http_client
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.web import http
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -80,6 +82,29 @@ class CodeMessageException(RuntimeError):
|
||||
self.msg = msg
|
||||
|
||||
|
||||
class RedirectException(CodeMessageException):
|
||||
"""A pseudo-error indicating that we want to redirect the client to a different
|
||||
location
|
||||
|
||||
Attributes:
|
||||
cookies: a list of set-cookies values to add to the response. For example:
|
||||
b"sessionId=a3fWa; Expires=Wed, 21 Oct 2015 07:28:00 GMT"
|
||||
"""
|
||||
|
||||
def __init__(self, location: bytes, http_code: int = http.FOUND):
|
||||
"""
|
||||
|
||||
Args:
|
||||
location: the URI to redirect to
|
||||
http_code: the HTTP response code
|
||||
"""
|
||||
msg = "Redirect to %s" % (location.decode("utf-8"),)
|
||||
super().__init__(code=http_code, msg=msg)
|
||||
self.location = location
|
||||
|
||||
self.cookies = [] # type: List[bytes]
|
||||
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
message (as well as an HTTP status code).
|
||||
@@ -158,12 +183,6 @@ class UserDeactivatedError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
"""An error raised when a registration event fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FederationDeniedError(SynapseError):
|
||||
"""An error raised when the server tries to federate with a server which
|
||||
is not on its federation whitelist.
|
||||
@@ -383,11 +402,9 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
"""The client's request to create a room used a room version that the server does
|
||||
not support."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, msg="Homeserver does not support this room version"):
|
||||
super(UnsupportedRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Homeserver does not support this room version",
|
||||
errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import List
|
||||
|
||||
from six import text_type
|
||||
|
||||
import jsonschema
|
||||
@@ -293,7 +295,7 @@ class Filter(object):
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
contains_url = False
|
||||
labels = []
|
||||
labels = [] # type: List[str]
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
from collections import OrderedDict
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
|
||||
@@ -23,7 +24,9 @@ class Ratelimiter(object):
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.message_counts = collections.OrderedDict()
|
||||
self.message_counts = (
|
||||
OrderedDict()
|
||||
) # type: OrderedDict[Any, Tuple[float, int, Optional[float]]]
|
||||
|
||||
def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
|
||||
@@ -57,6 +57,9 @@ class RoomVersion(object):
|
||||
state_res = attr.ib() # int; one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib() # bool
|
||||
|
||||
# bool: before MSC2260, anyone was allowed to send an aliases event
|
||||
special_case_aliases_auth = attr.ib(type=bool, default=False)
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = RoomVersion(
|
||||
@@ -65,6 +68,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V1,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -72,6 +76,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -79,6 +84,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -86,6 +92,7 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -93,6 +100,14 @@ class RoomVersions(object):
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
)
|
||||
MSC2260_DEV = RoomVersion(
|
||||
"org.matrix.msc2260",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -104,5 +119,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V3,
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.MSC2260_DEV,
|
||||
)
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -84,8 +84,7 @@ class AdminCmdServer(HomeServer):
|
||||
|
||||
|
||||
class AdminCmdReplicationHandler(ReplicationClientHandler):
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
pass
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
|
||||
@@ -115,9 +115,8 @@ class ASReplicationHandler(ReplicationClientHandler):
|
||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
|
||||
@@ -57,11 +57,15 @@ from synapse.rest.client.v1.room import (
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -85,6 +89,7 @@ class ClientReaderSlavedStore(
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
@@ -120,6 +125,8 @@ class ClientReaderServer(HomeServer):
|
||||
PushRuleRestServlet(self).register(resource)
|
||||
VersionsRestServlet(self).register(resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({"/_matrix/client": resource})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -56,6 +56,9 @@ from synapse.rest.client.v1.room import (
|
||||
RoomStateEventRestServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -81,6 +84,7 @@ class EventCreatorSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -33,8 +33,10 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
@@ -46,6 +48,9 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -63,9 +68,12 @@ class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -38,7 +38,11 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams._base import ReceiptsStream
|
||||
from synapse.replication.tcp.streams._base import (
|
||||
DeviceListsStream,
|
||||
ReceiptsStream,
|
||||
ToDeviceStream,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import Database
|
||||
from synapse.types import ReadReceipt
|
||||
@@ -145,9 +149,8 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||
@@ -159,6 +162,13 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
args.update(self.send_handler.stream_positions())
|
||||
return args
|
||||
|
||||
def on_remote_server_up(self, server: str):
|
||||
"""Called when get a new REMOTE_SERVER_UP command."""
|
||||
|
||||
# Let's wake up the transaction queue for the server in case we have
|
||||
# pending stuff to send to it.
|
||||
self.send_handler.wake_destination(server)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
@@ -206,7 +216,7 @@ class FederationSenderHandler(object):
|
||||
to the federation sender.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, replication_client):
|
||||
def __init__(self, hs: FederationSenderServer, replication_client):
|
||||
self.store = hs.get_datastore()
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
@@ -227,6 +237,9 @@ class FederationSenderHandler(object):
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
def stream_positions(self):
|
||||
return {"federation": self.federation_position}
|
||||
|
||||
@@ -247,6 +260,20 @@ class FederationSenderHandler(object):
|
||||
"process_receipts_for_federation", self._on_new_receipts, rows
|
||||
)
|
||||
|
||||
# ... as well as device updates and messages
|
||||
elif stream_name == DeviceListsStream.NAME:
|
||||
hosts = set(row.destination for row in rows)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
# clients and remote servers, so we ignore entities that start with
|
||||
# '@' (since they'll be local users rather than destinations).
|
||||
hosts = set(row.entity for row in rows if not row.entity.startswith("@"))
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_new_receipts(self, rows):
|
||||
"""
|
||||
|
||||
@@ -31,7 +31,7 @@ from prometheus_client import Gauge
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.resource import EncodingResourceWrapper, IResource, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
@@ -109,7 +109,16 @@ class SynapseHomeServer(HomeServer):
|
||||
for path, resmodule in additional_resources.items():
|
||||
handler_cls, config = load_module(resmodule)
|
||||
handler = handler_cls(config, module_api)
|
||||
resources[path] = AdditionalResource(self, handler.handle_request)
|
||||
if IResource.providedBy(handler):
|
||||
resource = handler
|
||||
elif hasattr(handler, "handle_request"):
|
||||
resource = AdditionalResource(self, handler.handle_request)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"additional_resource %s does not implement a known interface"
|
||||
% (resmodule["module"],)
|
||||
)
|
||||
resources[path] = resource
|
||||
|
||||
# try to find something useful to redirect '/' to
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
|
||||
@@ -34,6 +34,7 @@ from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
@@ -47,6 +48,7 @@ logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
RoomStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
|
||||
@@ -141,9 +141,8 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -54,6 +54,9 @@ from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -77,6 +80,7 @@ class SynchrotronSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
@@ -358,9 +362,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
|
||||
@@ -172,9 +172,8 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.user_directory = hs.get_user_directory_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == EventsStream.NAME:
|
||||
|
||||
@@ -53,6 +53,18 @@ Missing mandatory `server_name` config option.
|
||||
"""
|
||||
|
||||
|
||||
CONFIG_FILE_HEADER = """\
|
||||
# Configuration file for Synapse.
|
||||
#
|
||||
# This is a YAML file: see [1] for a quick introduction. Note in particular
|
||||
# that *indentation is important*: all the elements of a list or dictionary
|
||||
# should have the same indentation.
|
||||
#
|
||||
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def path_exists(file_path):
|
||||
"""Check if a file exists
|
||||
|
||||
@@ -344,7 +356,7 @@ class RootConfig(object):
|
||||
str: the yaml config file
|
||||
"""
|
||||
|
||||
return "\n\n".join(
|
||||
return CONFIG_FILE_HEADER + "\n\n".join(
|
||||
dedent(conf)
|
||||
for conf in self.invoke_all(
|
||||
"generate_config_section",
|
||||
@@ -574,8 +586,8 @@ class RootConfig(object):
|
||||
if not path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "w") as config_file:
|
||||
config_file.write("# vim:ft=yaml\n\n")
|
||||
config_file.write(config_str)
|
||||
config_file.write("\n\n# vim:ft=yaml")
|
||||
|
||||
config_dict = yaml.safe_load(config_str)
|
||||
obj.generate_missing_files(config_dict, config_dir_path)
|
||||
|
||||
@@ -37,10 +37,12 @@ class EmailConfig(Config):
|
||||
|
||||
self.email_enable_notifs = False
|
||||
|
||||
email_config = config.get("email", {})
|
||||
email_config = config.get("email")
|
||||
if email_config is None:
|
||||
email_config = {}
|
||||
|
||||
self.email_smtp_host = email_config.get("smtp_host", None)
|
||||
self.email_smtp_port = email_config.get("smtp_port", None)
|
||||
self.email_smtp_host = email_config.get("smtp_host", "localhost")
|
||||
self.email_smtp_port = email_config.get("smtp_port", 25)
|
||||
self.email_smtp_user = email_config.get("smtp_user", None)
|
||||
self.email_smtp_pass = email_config.get("smtp_pass", None)
|
||||
self.require_transport_security = email_config.get(
|
||||
@@ -74,9 +76,9 @@ class EmailConfig(Config):
|
||||
self.email_template_dir = os.path.abspath(template_dir)
|
||||
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
account_validity_renewal_enabled = config.get("account_validity", {}).get(
|
||||
"renew_at"
|
||||
)
|
||||
|
||||
account_validity_config = config.get("account_validity") or {}
|
||||
account_validity_renewal_enabled = account_validity_config.get("renew_at")
|
||||
|
||||
self.threepid_behaviour_email = (
|
||||
# Have Synapse handle the email sending if account_threepid_delegates.email
|
||||
@@ -278,7 +280,9 @@ class EmailConfig(Config):
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
)
|
||||
self.email_riot_base_url = email_config.get("riot_base_url", None)
|
||||
self.email_riot_base_url = email_config.get(
|
||||
"client_base_url", email_config.get("riot_base_url", None)
|
||||
)
|
||||
|
||||
if account_validity_renewal_enabled:
|
||||
self.email_expiry_template_html = email_config.get(
|
||||
@@ -294,107 +298,111 @@ class EmailConfig(Config):
|
||||
raise ConfigError("Unable to find email template file %s" % (p,))
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for password resets, notification events or
|
||||
# account expiry notices
|
||||
return """\
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
# If your SMTP server requires authentication, the optional smtp_user &
|
||||
# smtp_pass variables should be used
|
||||
#
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: false
|
||||
#
|
||||
# # notif_from defines the "From" address to use when sending emails.
|
||||
# # It must be set if email sending is enabled.
|
||||
# #
|
||||
# # The placeholder '%(app)s' will be replaced by the application name,
|
||||
# # which is normally 'app_name' (below), but may be overridden by the
|
||||
# # Matrix client application.
|
||||
# #
|
||||
# # Note that the placeholder must be written '%(app)s', including the
|
||||
# # trailing 's'.
|
||||
# #
|
||||
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
#
|
||||
# # app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# # defaults to 'Matrix'.
|
||||
# #
|
||||
# #app_name: my_branded_matrix_server
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
# notif_for_new_users: true
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
# # the "app_name" setting is ignored
|
||||
# #
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Configure the time that a validation email or text message code
|
||||
# # will expire after sending
|
||||
# #
|
||||
# # This is currently used for password resets
|
||||
# #
|
||||
# #validation_token_lifetime: 1h
|
||||
#
|
||||
# # Template directory. All template files should be stored within this
|
||||
# # directory. If not set, default templates from within the Synapse
|
||||
# # package will be used
|
||||
# #
|
||||
# # For the list of default templates, please see
|
||||
# # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
# #
|
||||
# #template_dir: res/templates
|
||||
#
|
||||
# # Templates for email notifications
|
||||
# #
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
#
|
||||
# # Templates for account expiry notices
|
||||
# #
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
#
|
||||
# # Templates for password reset emails sent by the homeserver
|
||||
# #
|
||||
# #password_reset_template_html: password_reset.html
|
||||
# #password_reset_template_text: password_reset.txt
|
||||
#
|
||||
# # Templates for registration emails sent by the homeserver
|
||||
# #
|
||||
# #registration_template_html: registration.html
|
||||
# #registration_template_text: registration.txt
|
||||
#
|
||||
# # Templates for validation emails sent by the homeserver when adding an email to
|
||||
# # your user account
|
||||
# #
|
||||
# #add_threepid_template_html: add_threepid.html
|
||||
# #add_threepid_template_text: add_threepid.txt
|
||||
#
|
||||
# # Templates for password reset success and failure pages that a user
|
||||
# # will see after attempting to reset their password
|
||||
# #
|
||||
# #password_reset_template_success_html: password_reset_success.html
|
||||
# #password_reset_template_failure_html: password_reset_failure.html
|
||||
#
|
||||
# # Templates for registration success and failure pages that a user
|
||||
# # will see after attempting to register using an email or phone
|
||||
# #
|
||||
# #registration_template_success_html: registration_success.html
|
||||
# #registration_template_failure_html: registration_failure.html
|
||||
#
|
||||
# # Templates for success and failure pages that a user will see after attempting
|
||||
# # to add an email or phone to their account
|
||||
# #
|
||||
# #add_threepid_success_html: add_threepid_success.html
|
||||
# #add_threepid_failure_html: add_threepid_failure.html
|
||||
email:
|
||||
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
|
||||
#
|
||||
#smtp_host: mail.server
|
||||
|
||||
# The port on the mail server for outgoing SMTP. Defaults to 25.
|
||||
#
|
||||
#smtp_port: 587
|
||||
|
||||
# Username/password for authentication to the SMTP server. By default, no
|
||||
# authentication is attempted.
|
||||
#
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
|
||||
# Uncomment the following to require TLS transport security for SMTP.
|
||||
# By default, Synapse will connect over plain text, and will then switch to
|
||||
# TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
|
||||
# Synapse will refuse to connect unless the server supports STARTTLS.
|
||||
#
|
||||
#require_transport_security: true
|
||||
|
||||
# Enable sending emails for messages that the user has missed
|
||||
#
|
||||
#enable_notifs: false
|
||||
|
||||
# notif_from defines the "From" address to use when sending emails.
|
||||
# It must be set if email sending is enabled.
|
||||
#
|
||||
# The placeholder '%(app)s' will be replaced by the application name,
|
||||
# which is normally 'app_name' (below), but may be overridden by the
|
||||
# Matrix client application.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
#notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
|
||||
# app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# defaults to 'Matrix'.
|
||||
#
|
||||
#app_name: my_branded_matrix_server
|
||||
|
||||
# Uncomment the following to disable automatic subscription to email
|
||||
# notifications for new users. Enabled by default.
|
||||
#
|
||||
#notif_for_new_users: false
|
||||
|
||||
# Custom URL for client links within the email notifications. By default
|
||||
# links will be based on "https://matrix.to".
|
||||
#
|
||||
# (This setting used to be called riot_base_url; the old name is still
|
||||
# supported for backwards-compatibility but is now deprecated.)
|
||||
#
|
||||
#client_base_url: "http://localhost/riot"
|
||||
|
||||
# Configure the time that a validation email will expire after sending.
|
||||
# Defaults to 1h.
|
||||
#
|
||||
#validation_token_lifetime: 15m
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * The contents of email notifications of missed events: 'notif_mail.html' and
|
||||
# 'notif_mail.txt'.
|
||||
#
|
||||
# * The contents of account expiry notice emails: 'notice_expiry.html' and
|
||||
# 'notice_expiry.txt'.
|
||||
#
|
||||
# * The contents of password reset emails sent by the homeserver:
|
||||
# 'password_reset.html' and 'password_reset.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in the password reset email: 'password_reset_success.html' and
|
||||
# 'password_reset_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent during registration:
|
||||
# 'registration.html' and 'registration.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent during registration:
|
||||
# 'registration_success.html' and 'registration_failure.html'
|
||||
#
|
||||
# * The contents of address verification emails sent when an address is added
|
||||
# to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
|
||||
#
|
||||
# * HTML pages for success and failure that a user will see when they follow
|
||||
# the link in an address verification email sent when an address is added
|
||||
# to a Matrix account: 'add_threepid_success.html' and
|
||||
# 'add_threepid_failure.html'
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ class PushConfig(Config):
|
||||
|
||||
# Now check for the one in the 'email' section and honour it,
|
||||
# with a warning.
|
||||
push_config = config.get("email", {})
|
||||
push_config = config.get("email") or {}
|
||||
redact_content = push_config.get("redact_content")
|
||||
if redact_content is not None:
|
||||
print(
|
||||
|
||||
@@ -27,6 +27,9 @@ class AccountValidityConfig(Config):
|
||||
section = "accountvalidity"
|
||||
|
||||
def __init__(self, config, synapse_config):
|
||||
if config is None:
|
||||
return
|
||||
super(AccountValidityConfig, self).__init__()
|
||||
self.enabled = config.get("enabled", False)
|
||||
self.renew_by_email_enabled = "renew_at" in config
|
||||
|
||||
@@ -91,7 +94,7 @@ class RegistrationConfig(Config):
|
||||
)
|
||||
|
||||
self.account_validity = AccountValidityConfig(
|
||||
config.get("account_validity", {}), config
|
||||
config.get("account_validity") or {}, config
|
||||
)
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
@@ -159,23 +162,6 @@ class RegistrationConfig(Config):
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# ``enabled`` defines whether the account validity feature is enabled. Defaults
|
||||
# to False.
|
||||
#
|
||||
# ``period`` allows setting the period after which an account is valid
|
||||
# after its registration. When renewing the account, its validity period
|
||||
# will be extended by this amount of time. This parameter is required when using
|
||||
# the account validity feature.
|
||||
#
|
||||
# ``renew_at`` is the amount of time before an account's expiry date at which
|
||||
# Synapse will send an email to the account's email address with a renewal link.
|
||||
# This needs the ``email`` and ``public_baseurl`` configuration sections to be
|
||||
# filled.
|
||||
#
|
||||
# ``renew_email_subject`` is the subject of the email sent out with the renewal
|
||||
# link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
|
||||
# from the ``email`` section.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
@@ -186,21 +172,55 @@ class RegistrationConfig(Config):
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10%% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: true
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %%(app)s account"
|
||||
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||
# # user when trying to renew an account. Optional, defaults to
|
||||
# # synapse/res/templates.
|
||||
# template_dir: "res/templates"
|
||||
# # HTML to be displayed to the user after they successfully renewed their
|
||||
# # account. Optional.
|
||||
# account_renewed_html_path: "account_renewed.html"
|
||||
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||
# # renewal token. Optional.
|
||||
# invalid_token_html_path: "invalid_token.html"
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
|
||||
@@ -121,6 +121,7 @@ class SAML2Config(Config):
|
||||
required_methods = [
|
||||
"get_saml_attributes",
|
||||
"saml_response_to_user_attributes",
|
||||
"get_remote_user_id",
|
||||
]
|
||||
missing_methods = [
|
||||
method
|
||||
|
||||
@@ -294,6 +294,14 @@ class ServerConfig(Config):
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
if self.retention_enabled:
|
||||
logger.info(
|
||||
"Message retention policies support enabled with the following default"
|
||||
" policy: min_lifetime = %s ; max_lifetime = %s",
|
||||
self.retention_default_min_lifetime,
|
||||
self.retention_default_max_lifetime,
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
@@ -948,17 +956,17 @@ class ServerConfig(Config):
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
# interval: 1d
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
||||
@@ -32,6 +32,17 @@ from synapse.util import glob_to_regex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ACME_SUPPORT_ENABLED_WARN = """\
|
||||
This server uses Synapse's built-in ACME support. Note that ACME v1 has been
|
||||
deprecated by Let's Encrypt, and that Synapse doesn't currently support ACME v2,
|
||||
which means that this feature will not work with Synapse installs set up after
|
||||
November 2019, and that it may stop working on June 2020 for installs set up
|
||||
before that date.
|
||||
|
||||
For more info and alternative solutions, see
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
class TlsConfig(Config):
|
||||
section = "tls"
|
||||
@@ -44,6 +55,9 @@ class TlsConfig(Config):
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
|
||||
if self.acme_enabled:
|
||||
logger.warning(ACME_SUPPORT_ENABLED_WARN)
|
||||
|
||||
# hyperlink complains on py2 if this is not a Unicode
|
||||
self.acme_url = six.text_type(
|
||||
acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory")
|
||||
@@ -109,6 +123,8 @@ class TlsConfig(Config):
|
||||
fed_whitelist_entries = config.get(
|
||||
"federation_certificate_verification_whitelist", []
|
||||
)
|
||||
if fed_whitelist_entries is None:
|
||||
fed_whitelist_entries = []
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[str]
|
||||
@@ -360,6 +376,11 @@ class TlsConfig(Config):
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
|
||||
# ACME v2. This means that this feature currently won't work with installs set
|
||||
# up after November 2019. For more info, and alternative solutions, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
#
|
||||
# Note that provisioning a certificate in this way requires port 80 to be
|
||||
# routed to Synapse so that it can complete the http-01 ACME challenge.
|
||||
# By default, if you enable ACME support, Synapse will attempt to listen on
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,13 +18,17 @@
|
||||
import collections.abc
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.sign import sign_json
|
||||
from signedjson.types import SigningKey
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events.utils import prune_event, prune_event_dict
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -112,18 +117,28 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
|
||||
return hashed.name, hashed.digest()
|
||||
|
||||
|
||||
def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
def compute_event_signature(
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
signature_name: str,
|
||||
signing_key: SigningKey,
|
||||
) -> Dict[str, Dict[str, str]]:
|
||||
"""Compute the signature of the event for the given name and key.
|
||||
|
||||
Args:
|
||||
event_dict (dict): The event as a dict
|
||||
signature_name (str): The name of the entity signing the event
|
||||
room_version: the version of the room that this event is in.
|
||||
(the room version determines the redaction algorithm and hence the
|
||||
json to be signed)
|
||||
|
||||
event_dict: The event as a dict
|
||||
|
||||
signature_name: The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
|
||||
signing_key: The key to sign with
|
||||
|
||||
Returns:
|
||||
dict[str, dict[str, str]]: Returns a dictionary in the same format of
|
||||
an event's signatures field.
|
||||
a dictionary in the same format of an event's signatures field.
|
||||
"""
|
||||
redact_json = prune_event_dict(event_dict)
|
||||
redact_json.pop("age_ts", None)
|
||||
@@ -137,23 +152,26 @@ def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
|
||||
|
||||
def add_hashes_and_signatures(
|
||||
event_dict, signature_name, signing_key, hash_algorithm=hashlib.sha256
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
signature_name: str,
|
||||
signing_key: SigningKey,
|
||||
):
|
||||
"""Add content hash and sign the event
|
||||
|
||||
Args:
|
||||
event_dict (dict): The event to add hashes to and sign
|
||||
signature_name (str): The name of the entity signing the event
|
||||
room_version: the version of the room this event is in
|
||||
|
||||
event_dict: The event to add hashes to and sign
|
||||
signature_name: The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
|
||||
to hash the event
|
||||
signing_key: The key to sign with
|
||||
"""
|
||||
|
||||
name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm)
|
||||
name, digest = compute_content_hash(event_dict, hash_algorithm=hashlib.sha256)
|
||||
|
||||
event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
|
||||
|
||||
event_dict["signatures"] = compute_event_signature(
|
||||
event_dict, signature_name=signature_name, signing_key=signing_key
|
||||
room_version, event_dict, signature_name=signature_name, signing_key=signing_key
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,17 +24,27 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
def check(
|
||||
room_version_obj: RoomVersion,
|
||||
event,
|
||||
auth_events,
|
||||
do_sig_check=True,
|
||||
do_size_check=True,
|
||||
):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version (str): the version of the room
|
||||
room_version_obj: the version of the room
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
@@ -89,7 +100,12 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
|
||||
#
|
||||
# 1. If type is m.room.create:
|
||||
if event.type == EventTypes.Create:
|
||||
# 1b. If the domain of the room_id does not match the domain of the sender,
|
||||
# reject.
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
@@ -97,39 +113,49 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
403, "Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
|
||||
room_version = event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# 1c. If content.room_version is present and is not a recognised version, reject
|
||||
room_version_prop = event.content.get("room_version", "1")
|
||||
if room_version_prop not in KNOWN_ROOM_VERSIONS:
|
||||
raise AuthError(
|
||||
403, "room appears to have unsupported version %s" % (room_version,)
|
||||
403,
|
||||
"room appears to have unsupported version %s" % (room_version_prop,),
|
||||
)
|
||||
# FIXME
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
# 3. If event does not have a m.room.create in its auth_events, reject.
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
|
||||
if not creation_event:
|
||||
raise AuthError(403, "No create event in auth events")
|
||||
|
||||
# additional check for m.federate
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(403, "This room has been marked as unfederatable.")
|
||||
|
||||
# FIXME: Temp hack
|
||||
# 4. If type is m.room.aliases
|
||||
if event.type == EventTypes.Aliases:
|
||||
# 4a. If event has no state_key, reject
|
||||
if not event.is_state():
|
||||
raise AuthError(403, "Alias event must be a state event")
|
||||
if not event.state_key:
|
||||
raise AuthError(403, "Alias event must have non-empty state_key")
|
||||
|
||||
# 4b. If sender's domain doesn't matches [sic] state_key, reject
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if event.state_key != sender_domain:
|
||||
raise AuthError(
|
||||
403, "Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
# 4c. Otherwise, allow.
|
||||
# This is removed by https://github.com/matrix-org/matrix-doc/pull/2260
|
||||
if room_version_obj.special_case_aliases_auth:
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
@@ -160,7 +186,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
_check_power_levels(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(room_version, event, auth_events)
|
||||
check_redaction(room_version_obj, event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
@@ -386,7 +412,7 @@ def _can_send_event(event, auth_events):
|
||||
return True
|
||||
|
||||
|
||||
def check_redaction(room_version, event, auth_events):
|
||||
def check_redaction(room_version_obj: RoomVersion, event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
@@ -406,11 +432,7 @@ def check_redaction(room_version, event, auth_events):
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
raise RuntimeError("Unrecognized room version %r" % (room_version,))
|
||||
|
||||
if v.event_format == EventFormatVersions.V1:
|
||||
if room_version_obj.event_format == EventFormatVersions.V1:
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
@@ -634,7 +656,7 @@ def get_public_keys(invite_event):
|
||||
return public_keys
|
||||
|
||||
|
||||
def auth_types_for_event(event) -> Set[Tuple[str]]:
|
||||
def auth_types_for_event(event) -> Set[Tuple[str, str]]:
|
||||
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||
needed to auth the event. The returned list may be a superset of what
|
||||
would actually be required depending on the full state of the room.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,13 +17,14 @@
|
||||
|
||||
import os
|
||||
from distutils.util import strtobool
|
||||
from typing import Optional, Type
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
@@ -36,34 +38,115 @@ from synapse.util.frozenutils import freeze
|
||||
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
|
||||
|
||||
|
||||
class DictProperty:
|
||||
"""An object property which delegates to the `_dict` within its parent object."""
|
||||
|
||||
__slots__ = ["key"]
|
||||
|
||||
def __init__(self, key: str):
|
||||
self.key = key
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
# if the property is accessed as a class property rather than an instance
|
||||
# property, return the property itself rather than the value
|
||||
if instance is None:
|
||||
return self
|
||||
try:
|
||||
return instance._dict[self.key]
|
||||
except KeyError as e1:
|
||||
# We want this to look like a regular attribute error (mostly so that
|
||||
# hasattr() works correctly), so we convert the KeyError into an
|
||||
# AttributeError.
|
||||
#
|
||||
# To exclude the KeyError from the traceback, we explicitly
|
||||
# 'raise from e1.__context__' (which is better than 'raise from None',
|
||||
# becuase that would omit any *earlier* exceptions).
|
||||
#
|
||||
raise AttributeError(
|
||||
"'%s' has no '%s' property" % (type(instance), self.key)
|
||||
) from e1.__context__
|
||||
|
||||
def __set__(self, instance, v):
|
||||
instance._dict[self.key] = v
|
||||
|
||||
def __delete__(self, instance):
|
||||
try:
|
||||
del instance._dict[self.key]
|
||||
except KeyError as e1:
|
||||
raise AttributeError(
|
||||
"'%s' has no '%s' property" % (type(instance), self.key)
|
||||
) from e1.__context__
|
||||
|
||||
|
||||
class DefaultDictProperty(DictProperty):
|
||||
"""An extension of DictProperty which provides a default if the property is
|
||||
not present in the parent's _dict.
|
||||
|
||||
Note that this means that hasattr() on the property always returns True.
|
||||
"""
|
||||
|
||||
__slots__ = ["default"]
|
||||
|
||||
def __init__(self, key, default):
|
||||
super().__init__(key)
|
||||
self.default = default
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
if instance is None:
|
||||
return self
|
||||
return instance._dict.get(self.key, self.default)
|
||||
|
||||
|
||||
class _EventInternalMetadata(object):
|
||||
def __init__(self, internal_metadata_dict):
|
||||
self.__dict__ = dict(internal_metadata_dict)
|
||||
__slots__ = ["_dict"]
|
||||
|
||||
def get_dict(self):
|
||||
return dict(self.__dict__)
|
||||
def __init__(self, internal_metadata_dict: JsonDict):
|
||||
# we have to copy the dict, because it turns out that the same dict is
|
||||
# reused. TODO: fix that
|
||||
self._dict = dict(internal_metadata_dict)
|
||||
|
||||
def is_outlier(self):
|
||||
return getattr(self, "outlier", False)
|
||||
outlier = DictProperty("outlier") # type: bool
|
||||
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
|
||||
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
|
||||
recheck_redaction = DictProperty("recheck_redaction") # type: bool
|
||||
soft_failed = DictProperty("soft_failed") # type: bool
|
||||
proactively_send = DictProperty("proactively_send") # type: bool
|
||||
redacted = DictProperty("redacted") # type: bool
|
||||
txn_id = DictProperty("txn_id") # type: str
|
||||
token_id = DictProperty("token_id") # type: str
|
||||
stream_ordering = DictProperty("stream_ordering") # type: int
|
||||
|
||||
def is_out_of_band_membership(self):
|
||||
# XXX: These are set by StreamWorkerStore._set_before_and_after.
|
||||
# I'm pretty sure that these are never persisted to the database, so shouldn't
|
||||
# be here
|
||||
before = DictProperty("before") # type: str
|
||||
after = DictProperty("after") # type: str
|
||||
order = DictProperty("order") # type: int
|
||||
|
||||
def get_dict(self) -> JsonDict:
|
||||
return dict(self._dict)
|
||||
|
||||
def is_outlier(self) -> bool:
|
||||
return self._dict.get("outlier", False)
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
rejection. This is needed as those events are marked as outliers, but
|
||||
they still need to be processed as if they're new events (e.g. updating
|
||||
invite state in the database, relaying to clients, etc).
|
||||
"""
|
||||
return getattr(self, "out_of_band_membership", False)
|
||||
return self._dict.get("out_of_band_membership", False)
|
||||
|
||||
def get_send_on_behalf_of(self):
|
||||
def get_send_on_behalf_of(self) -> Optional[str]:
|
||||
"""Whether this server should send the event on behalf of another server.
|
||||
This is used by the federation "send_join" API to forward the initial join
|
||||
event for a server in the room.
|
||||
|
||||
returns a str with the name of the server this event is sent on behalf of.
|
||||
"""
|
||||
return getattr(self, "send_on_behalf_of", None)
|
||||
return self._dict.get("send_on_behalf_of")
|
||||
|
||||
def need_to_check_redaction(self):
|
||||
def need_to_check_redaction(self) -> bool:
|
||||
"""Whether the redaction event needs to be rechecked when fetching
|
||||
from the database.
|
||||
|
||||
@@ -76,9 +159,9 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "recheck_redaction", False)
|
||||
return self._dict.get("recheck_redaction", False)
|
||||
|
||||
def is_soft_failed(self):
|
||||
def is_soft_failed(self) -> bool:
|
||||
"""Whether the event has been soft failed.
|
||||
|
||||
Soft failed events should be handled as usual, except:
|
||||
@@ -90,7 +173,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "soft_failed", False)
|
||||
return self._dict.get("soft_failed", False)
|
||||
|
||||
def should_proactively_send(self):
|
||||
"""Whether the event, if ours, should be sent to other clients and
|
||||
@@ -102,7 +185,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "proactively_send", True)
|
||||
return self._dict.get("proactively_send", True)
|
||||
|
||||
def is_redacted(self):
|
||||
"""Whether the event has been redacted.
|
||||
@@ -113,32 +196,7 @@ class _EventInternalMetadata(object):
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "redacted", False)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
# We want to be able to use hasattr with the event dict properties.
|
||||
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
|
||||
# we need to transform the KeyError into an AttributeError
|
||||
def getter(self):
|
||||
try:
|
||||
return self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def setter(self, v):
|
||||
try:
|
||||
self._event_dict[key] = v
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def delete(self):
|
||||
try:
|
||||
del self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
return property(getter, setter, delete)
|
||||
return self._dict.get("redacted", False)
|
||||
|
||||
|
||||
class EventBase(object):
|
||||
@@ -154,21 +212,27 @@ class EventBase(object):
|
||||
self.unsigned = unsigned
|
||||
self.rejected_reason = rejected_reason
|
||||
|
||||
self._event_dict = event_dict
|
||||
self._dict = event_dict
|
||||
|
||||
self.internal_metadata = _EventInternalMetadata(internal_metadata_dict)
|
||||
|
||||
auth_events = _event_dict_property("auth_events")
|
||||
depth = _event_dict_property("depth")
|
||||
content = _event_dict_property("content")
|
||||
hashes = _event_dict_property("hashes")
|
||||
origin = _event_dict_property("origin")
|
||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||
prev_events = _event_dict_property("prev_events")
|
||||
redacts = _event_dict_property("redacts")
|
||||
room_id = _event_dict_property("room_id")
|
||||
sender = _event_dict_property("sender")
|
||||
user_id = _event_dict_property("sender")
|
||||
auth_events = DictProperty("auth_events")
|
||||
depth = DictProperty("depth")
|
||||
content = DictProperty("content")
|
||||
hashes = DictProperty("hashes")
|
||||
origin = DictProperty("origin")
|
||||
origin_server_ts = DictProperty("origin_server_ts")
|
||||
prev_events = DictProperty("prev_events")
|
||||
redacts = DefaultDictProperty("redacts", None)
|
||||
room_id = DictProperty("room_id")
|
||||
sender = DictProperty("sender")
|
||||
state_key = DictProperty("state_key")
|
||||
type = DictProperty("type")
|
||||
user_id = DictProperty("sender")
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def membership(self):
|
||||
@@ -177,19 +241,19 @@ class EventBase(object):
|
||||
def is_state(self):
|
||||
return hasattr(self, "state_key") and self.state_key is not None
|
||||
|
||||
def get_dict(self):
|
||||
d = dict(self._event_dict)
|
||||
def get_dict(self) -> JsonDict:
|
||||
d = dict(self._dict)
|
||||
d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)})
|
||||
|
||||
return d
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self._event_dict.get(key, default)
|
||||
return self._dict.get(key, default)
|
||||
|
||||
def get_internal_metadata_dict(self):
|
||||
return self.internal_metadata.get_dict()
|
||||
|
||||
def get_pdu_json(self, time_now=None):
|
||||
def get_pdu_json(self, time_now=None) -> JsonDict:
|
||||
pdu_json = self.get_dict()
|
||||
|
||||
if time_now is not None and "age_ts" in pdu_json["unsigned"]:
|
||||
@@ -206,16 +270,16 @@ class EventBase(object):
|
||||
raise AttributeError("Unrecognized attribute %s" % (instance,))
|
||||
|
||||
def __getitem__(self, field):
|
||||
return self._event_dict[field]
|
||||
return self._dict[field]
|
||||
|
||||
def __contains__(self, field):
|
||||
return field in self._event_dict
|
||||
return field in self._dict
|
||||
|
||||
def items(self):
|
||||
return list(self._event_dict.items())
|
||||
return list(self._dict.items())
|
||||
|
||||
def keys(self):
|
||||
return six.iterkeys(self._event_dict)
|
||||
return six.iterkeys(self._dict)
|
||||
|
||||
def prev_event_ids(self):
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
@@ -260,10 +324,7 @@ class FrozenEvent(EventBase):
|
||||
else:
|
||||
frozen_dict = event_dict
|
||||
|
||||
self.event_id = event_dict["event_id"]
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
self._event_id = event_dict["event_id"]
|
||||
|
||||
super(FrozenEvent, self).__init__(
|
||||
frozen_dict,
|
||||
@@ -273,6 +334,10 @@ class FrozenEvent(EventBase):
|
||||
rejected_reason=rejected_reason,
|
||||
)
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
return self._event_id
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
@@ -311,9 +376,6 @@ class FrozenEventV2(EventBase):
|
||||
frozen_dict = event_dict
|
||||
|
||||
self._event_id = None
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
|
||||
super(FrozenEventV2, self).__init__(
|
||||
frozen_dict,
|
||||
@@ -383,28 +445,7 @@ class FrozenEventV3(FrozenEventV2):
|
||||
return self._event_id
|
||||
|
||||
|
||||
def room_version_to_event_format(room_version):
|
||||
"""Converts a room version string to the event format
|
||||
|
||||
Args:
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
int
|
||||
|
||||
Raises:
|
||||
UnsupportedRoomVersionError if the room version is unknown
|
||||
"""
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
|
||||
if not v:
|
||||
# this can happen if support is withdrawn for a room version
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
return v.event_format
|
||||
|
||||
|
||||
def event_type_from_format_version(format_version):
|
||||
def event_type_from_format_version(format_version: int) -> Type[EventBase]:
|
||||
"""Returns the python type to use to construct an Event object for the
|
||||
given event format version.
|
||||
|
||||
@@ -424,3 +465,14 @@ def event_type_from_format_version(format_version):
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
|
||||
def make_event_from_dict(
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion = RoomVersions.V1,
|
||||
internal_metadata_dict: JsonDict = {},
|
||||
rejected_reason: Optional[str] = None,
|
||||
) -> EventBase:
|
||||
"""Construct an EventBase from the given event dict"""
|
||||
event_type = event_type_from_format_version(room_version.event_format)
|
||||
return event_type(event_dict, internal_metadata_dict, rejected_reason)
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
from nacl.signing import SigningKey
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -23,13 +25,14 @@ from synapse.api.room_versions import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.types import EventID
|
||||
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
|
||||
from synapse.types import EventID, JsonDict
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from . import _EventInternalMetadata, event_type_from_format_version
|
||||
|
||||
|
||||
@attr.s(slots=True, cmp=False, frozen=True)
|
||||
class EventBuilder(object):
|
||||
@@ -40,7 +43,7 @@ class EventBuilder(object):
|
||||
content/unsigned/internal_metadata fields are still mutable)
|
||||
|
||||
Attributes:
|
||||
format_version (int): Event format version
|
||||
room_version: Version of the target room
|
||||
room_id (str)
|
||||
type (str)
|
||||
sender (str)
|
||||
@@ -63,7 +66,7 @@ class EventBuilder(object):
|
||||
_hostname = attr.ib()
|
||||
_signing_key = attr.ib()
|
||||
|
||||
format_version = attr.ib()
|
||||
room_version = attr.ib(type=RoomVersion)
|
||||
|
||||
room_id = attr.ib()
|
||||
type = attr.ib()
|
||||
@@ -108,7 +111,8 @@ class EventBuilder(object):
|
||||
)
|
||||
auth_ids = yield self._auth.compute_auth_events(self, state_ids)
|
||||
|
||||
if self.format_version == EventFormatVersions.V1:
|
||||
format_version = self.room_version.event_format
|
||||
if format_version == EventFormatVersions.V1:
|
||||
auth_events = yield self._store.add_event_hashes(auth_ids)
|
||||
prev_events = yield self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
@@ -148,7 +152,7 @@ class EventBuilder(object):
|
||||
clock=self._clock,
|
||||
hostname=self._hostname,
|
||||
signing_key=self._signing_key,
|
||||
format_version=self.format_version,
|
||||
room_version=self.room_version,
|
||||
event_dict=event_dict,
|
||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||
)
|
||||
@@ -201,7 +205,7 @@ class EventBuilderFactory(object):
|
||||
clock=self.clock,
|
||||
hostname=self.hostname,
|
||||
signing_key=self.signing_key,
|
||||
format_version=room_version.event_format,
|
||||
room_version=room_version,
|
||||
type=key_values["type"],
|
||||
state_key=key_values.get("state_key"),
|
||||
room_id=key_values["room_id"],
|
||||
@@ -214,29 +218,19 @@ class EventBuilderFactory(object):
|
||||
|
||||
|
||||
def create_local_event_from_event_dict(
|
||||
clock,
|
||||
hostname,
|
||||
signing_key,
|
||||
format_version,
|
||||
event_dict,
|
||||
internal_metadata_dict=None,
|
||||
):
|
||||
clock: Clock,
|
||||
hostname: str,
|
||||
signing_key: SigningKey,
|
||||
room_version: RoomVersion,
|
||||
event_dict: JsonDict,
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
) -> EventBase:
|
||||
"""Takes a fully formed event dict, ensuring that fields like `origin`
|
||||
and `origin_server_ts` have correct values for a locally produced event,
|
||||
then signs and hashes it.
|
||||
|
||||
Args:
|
||||
clock (Clock)
|
||||
hostname (str)
|
||||
signing_key
|
||||
format_version (int)
|
||||
event_dict (dict)
|
||||
internal_metadata_dict (dict|None)
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
"""
|
||||
|
||||
format_version = room_version.event_format
|
||||
if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
|
||||
raise Exception("No event format defined for version %r" % (format_version,))
|
||||
|
||||
@@ -257,9 +251,9 @@ def create_local_event_from_event_dict(
|
||||
|
||||
event_dict.setdefault("signatures", {})
|
||||
|
||||
add_hashes_and_signatures(event_dict, hostname, signing_key)
|
||||
return event_type_from_format_version(format_version)(
|
||||
event_dict, internal_metadata_dict=internal_metadata_dict
|
||||
add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
|
||||
return make_event_from_dict(
|
||||
event_dict, room_version, internal_metadata_dict=internal_metadata_dict
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Dict, Optional, Tuple, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -23,6 +23,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import StateMap
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@@ -106,13 +107,11 @@ class EventContext:
|
||||
_state_group = attr.ib(default=None, type=Optional[int])
|
||||
state_group_before_event = attr.ib(default=None, type=Optional[int])
|
||||
prev_group = attr.ib(default=None, type=Optional[int])
|
||||
delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
delta_ids = attr.ib(default=None, type=Optional[StateMap[str]])
|
||||
app_service = attr.ib(default=None, type=Optional[ApplicationService])
|
||||
|
||||
_current_state_ids = attr.ib(
|
||||
default=None, type=Optional[Dict[Tuple[str, str], str]]
|
||||
)
|
||||
_prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
_current_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
|
||||
_prev_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
|
||||
@@ -15,12 +15,17 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
from typing import Dict
|
||||
|
||||
from synapse.spam_checker_api import SpamCheckerApi
|
||||
|
||||
MYPY = False
|
||||
if MYPY:
|
||||
import synapse.server
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self.spam_checker = None
|
||||
|
||||
module = None
|
||||
@@ -40,7 +45,7 @@ class SpamChecker(object):
|
||||
else:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -48,26 +53,30 @@ class SpamChecker(object):
|
||||
users receive a blank event.
|
||||
|
||||
Args:
|
||||
event (synapse.events.EventBase): the event to be checked
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
bool: True if the event is spammy.
|
||||
True if the event is spammy.
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
return self.spam_checker.check_event_for_spam(event)
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
def user_may_invite(
|
||||
self, inviter_userid: str, invitee_userid: str, room_id: str
|
||||
) -> bool:
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
If this method returns false, the invite will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
inviter_userid: The user ID of the sender of the invitation
|
||||
invitee_userid: The user ID targeted in the invitation
|
||||
room_id: The room ID
|
||||
|
||||
Returns:
|
||||
bool: True if the user may send an invite, otherwise False
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
@@ -76,52 +85,78 @@ class SpamChecker(object):
|
||||
inviter_userid, invitee_userid, room_id
|
||||
)
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
def user_may_create_room(self, userid: str) -> bool:
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
userid: The ID of the user attempting to create a room
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room, otherwise False
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room(userid)
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_alias (string): The alias to be created
|
||||
userid: The ID of the user attempting to create a room alias
|
||||
room_alias: The alias to be created
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room alias, otherwise False
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
def user_may_publish_room(self, userid: str, room_id: str) -> bool:
|
||||
"""Checks if a given user may publish a room to the directory
|
||||
|
||||
If this method returns false, the publish request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_id (string): The ID of the room that would be published
|
||||
userid: The user ID attempting to publish the room
|
||||
room_id: The ID of the room that would be published
|
||||
|
||||
Returns:
|
||||
bool: True if the user may publish the room, otherwise False
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||
|
||||
def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool:
|
||||
"""Checks if a user ID or display name are considered "spammy" by this server.
|
||||
|
||||
If the server considers a username spammy, then it will not be included in
|
||||
user directory results.
|
||||
|
||||
Args:
|
||||
user_profile: The user information to check, it contains the keys:
|
||||
* user_id
|
||||
* display_name
|
||||
* avatar_url
|
||||
|
||||
Returns:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
# For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering.
|
||||
checker = getattr(self.spam_checker, "check_username_for_spam", None)
|
||||
if not checker:
|
||||
return False
|
||||
# Make a copy of the user profile object to ensure the spam checker
|
||||
# cannot modify it.
|
||||
return checker(user_profile.copy())
|
||||
|
||||
@@ -74,15 +74,16 @@ class ThirdPartyEventRules(object):
|
||||
is_requester_admin (bool): If the requester is an admin
|
||||
|
||||
Returns:
|
||||
defer.Deferred
|
||||
defer.Deferred[bool]: Whether room creation is allowed or denied.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return
|
||||
return True
|
||||
|
||||
yield self.third_party_rules.on_create_room(
|
||||
ret = yield self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_threepid_can_be_invited(self, medium, address, room_id):
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import re
|
||||
from typing import Mapping, Union
|
||||
|
||||
from six import string_types
|
||||
|
||||
@@ -422,3 +423,37 @@ class EventClientSerializer(object):
|
||||
return yieldable_gather_results(
|
||||
self.serialize_event, events, time_now=time_now, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
):
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
TypeError if the input does not look like a valid power levels event content
|
||||
"""
|
||||
if not isinstance(old_power_levels, collections.Mapping):
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels = {}
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
power_levels[k] = v
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.Mapping):
|
||||
power_levels[k] = h = {}
|
||||
for k1, v1 in v.items():
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
raise TypeError(
|
||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
||||
)
|
||||
h[k1] = v1
|
||||
continue
|
||||
|
||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
||||
|
||||
return power_levels
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import event_type_from_format_version
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
@@ -33,7 +38,7 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -342,16 +347,15 @@ def _is_invite_via_3pid(event):
|
||||
)
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
def event_from_pdu_json(
|
||||
pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
|
||||
) -> EventBase:
|
||||
"""Construct an EventBase from an event json received over federation
|
||||
|
||||
Args:
|
||||
pdu_json (object): pdu as received over federation
|
||||
event_format_version (int): The event format version
|
||||
outlier (bool): True to mark this event as an outlier
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
pdu_json: pdu as received over federation
|
||||
room_version: The version of the room this event belongs to
|
||||
outlier: True to mark this event as an outlier
|
||||
|
||||
Raises:
|
||||
SynapseError: if the pdu is missing required fields or is otherwise
|
||||
@@ -370,8 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
|
||||
elif depth > MAX_DEPTH:
|
||||
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
|
||||
|
||||
event = event_type_from_format_version(event_format_version)(pdu_json)
|
||||
|
||||
event = make_event_from_dict(pdu_json, room_version)
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
@@ -17,6 +17,18 @@
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -29,16 +41,19 @@ from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.events import builder, room_version_to_event_format
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
@@ -50,6 +65,8 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class InvalidResponseError(RuntimeError):
|
||||
"""Helper for _try_destination_list: indicates that the server returned a response
|
||||
@@ -168,21 +185,17 @@ class FederationClient(FederationBase):
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
return self.transport_layer.claim_client_keys(destination, content, timeout)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def backfill(self, dest, room_id, limit, extremities):
|
||||
"""Requests some more historic PDUs for the given context from the
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
|
||||
) -> List[EventBase]:
|
||||
"""Requests some more historic PDUs for the given room from the
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote homeserver to ask.
|
||||
room_id (str): The room_id to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
we have seen from the context
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the received PDUs.
|
||||
limit (int): The maximum number of events to return.
|
||||
extremities (list): our current backwards extremities, to backfill from
|
||||
"""
|
||||
logger.debug("backfill extrem=%s", extremities)
|
||||
|
||||
@@ -190,34 +203,37 @@ class FederationClient(FederationBase):
|
||||
if not extremities:
|
||||
return
|
||||
|
||||
transaction_data = yield self.transport_layer.backfill(
|
||||
transaction_data = await self.transport_layer.backfill(
|
||||
dest, room_id, extremities, limit
|
||||
)
|
||||
|
||||
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
pdus = [
|
||||
event_from_pdu_json(p, format_ver, outlier=False)
|
||||
event_from_pdu_json(p, room_version, outlier=False)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield make_deferred_yieldable(
|
||||
pdus[:] = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||
self._check_sigs_and_hashes(room_version.identifier, pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return pdus
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_pdu(
|
||||
self, destinations, event_id, room_version, outlier=False, timeout=None
|
||||
):
|
||||
async def get_pdu(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
event_id: str,
|
||||
room_version: RoomVersion,
|
||||
outlier: bool = False,
|
||||
timeout: Optional[int] = None,
|
||||
) -> Optional[EventBase]:
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
servers.
|
||||
|
||||
@@ -225,18 +241,17 @@ class FederationClient(FederationBase):
|
||||
one succeeds.
|
||||
|
||||
Args:
|
||||
destinations (list): Which homeservers to query
|
||||
event_id (str): event to fetch
|
||||
room_version (str): version of the room
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
destinations: Which homeservers to query
|
||||
event_id: event to fetch
|
||||
room_version: version of the room
|
||||
outlier: Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
timeout (int): How long to try (in ms) each destination for before
|
||||
timeout: How long to try (in ms) each destination for before
|
||||
moving to the next destination. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the requested PDU, or None if we were unable to find
|
||||
it.
|
||||
The requested PDU, or None if we were unable to find it.
|
||||
"""
|
||||
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
@@ -247,8 +262,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
@@ -257,7 +270,7 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
transaction_data = await self.transport_layer.get_event(
|
||||
destination, event_id, timeout=timeout
|
||||
)
|
||||
|
||||
@@ -269,7 +282,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
pdu_list = [
|
||||
event_from_pdu_json(p, format_ver, outlier=outlier)
|
||||
event_from_pdu_json(p, room_version, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
@@ -277,7 +290,9 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
signed_pdu = await self._check_sigs_and_hash(
|
||||
room_version.identifier, pdu
|
||||
)
|
||||
|
||||
break
|
||||
|
||||
@@ -307,15 +322,16 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_pdu
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
|
||||
async def get_room_state_ids(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> Tuple[List[str], List[str]]:
|
||||
"""Calls the /state_ids endpoint to fetch the state at a particular point
|
||||
in the room, and the auth events for the given event
|
||||
|
||||
Returns:
|
||||
Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
|
||||
a tuple of (state event_ids, auth event_ids)
|
||||
"""
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
result = await self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
@@ -329,37 +345,39 @@ class FederationClient(FederationBase):
|
||||
|
||||
return state_event_ids, auth_event_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
res = yield self.transport_layer.get_event_auth(destination, room_id, event_id)
|
||||
async def get_event_auth(self, destination, room_id, event_id):
|
||||
res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"]
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True, room_version=room_version
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True, room_version=room_version.identifier
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return signed_auth
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _try_destination_list(self, description, destinations, callback):
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
destinations: Iterable[str],
|
||||
callback: Callable[[str], Awaitable[T]],
|
||||
) -> T:
|
||||
"""Try an operation on a series of servers, until it succeeds
|
||||
|
||||
Args:
|
||||
description (unicode): description of the operation we're doing, for logging
|
||||
description: description of the operation we're doing, for logging
|
||||
|
||||
destinations (Iterable[unicode]): list of server_names to try
|
||||
destinations: list of server_names to try
|
||||
|
||||
callback (callable): Function to run for each server. Passed a single
|
||||
argument: the server_name to try. May return a deferred.
|
||||
callback: Function to run for each server. Passed a single
|
||||
argument: the server_name to try.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
@@ -370,7 +388,7 @@ class FederationClient(FederationBase):
|
||||
suppressed if the exception is an InvalidResponseError.
|
||||
|
||||
Returns:
|
||||
The [Deferred] result of callback, if it succeeds
|
||||
The result of callback, if it succeeds
|
||||
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code, or
|
||||
@@ -381,10 +399,12 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
res = yield callback(destination)
|
||||
res = await callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except UnsupportedRoomVersionError:
|
||||
raise
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
@@ -398,14 +418,20 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=1
|
||||
"Failed to %s via %s", description, destination, exc_info=True
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
|
||||
def make_membership_event(
|
||||
self, destinations, room_id, user_id, membership, content, params
|
||||
):
|
||||
async def make_membership_event(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
membership: str,
|
||||
content: dict,
|
||||
params: Dict[str, str],
|
||||
) -> Tuple[str, EventBase, RoomVersion]:
|
||||
"""
|
||||
Creates an m.room.member event, with context, without participating in the room.
|
||||
|
||||
@@ -417,26 +443,28 @@ class FederationClient(FederationBase):
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destinations (Iterable[str]): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
room_id (str): The room in which the event will happen.
|
||||
user_id (str): The user whose membership is being evented.
|
||||
membership (str): The "membership" property of the event. Must be
|
||||
one of "join" or "leave".
|
||||
content (dict): Any additional data to put into the content field
|
||||
of the event.
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
Return:
|
||||
Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
|
||||
`(origin, event, event_format)` where origin is the remote
|
||||
homeserver which generated the event, and event_format is one of
|
||||
`synapse.api.room_versions.EventFormatVersions`.
|
||||
room_id: The room in which the event will happen.
|
||||
user_id: The user whose membership is being evented.
|
||||
membership: The "membership" property of the event. Must be one of
|
||||
"join" or "leave".
|
||||
content: Any additional data to put into the content field of the
|
||||
event.
|
||||
params: Query parameters to include in the request.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
Returns:
|
||||
`(origin, event, room_version)` where origin is the remote
|
||||
homeserver which generated the event, and room_version is the
|
||||
version of the room.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
Raises:
|
||||
UnsupportedRoomVersionError: if remote responds with
|
||||
a room version we don't understand.
|
||||
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -445,16 +473,17 @@ class FederationClient(FederationBase):
|
||||
% (membership, ",".join(valid_memberships))
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
ret = yield self.transport_layer.make_membership_event(
|
||||
async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
|
||||
ret = await self.transport_layer.make_membership_event(
|
||||
destination, room_id, user_id, membership, params
|
||||
)
|
||||
|
||||
# Note: If not supplied, the room version may be either v1 or v2,
|
||||
# however either way the event format version will be v1.
|
||||
room_version = ret.get("room_version", RoomVersions.V1.identifier)
|
||||
event_format = room_version_to_event_format(room_version)
|
||||
room_version_id = ret.get("room_version", RoomVersions.V1.identifier)
|
||||
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
if not room_version:
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
pdu_dict = ret.get("event", None)
|
||||
if not isinstance(pdu_dict, dict):
|
||||
@@ -474,92 +503,87 @@ class FederationClient(FederationBase):
|
||||
self._clock,
|
||||
self.hostname,
|
||||
self.signing_key,
|
||||
format_version=event_format,
|
||||
room_version=room_version,
|
||||
event_dict=pdu_dict,
|
||||
)
|
||||
|
||||
return (destination, ev, event_format)
|
||||
return destination, ev, room_version
|
||||
|
||||
return self._try_destination_list(
|
||||
return await self._try_destination_list(
|
||||
"make_" + membership, destinations, send_request
|
||||
)
|
||||
|
||||
def send_join(self, destinations, pdu, event_format_version):
|
||||
async def send_join(
|
||||
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
||||
) -> Dict[str, Any]:
|
||||
"""Sends a join event to one of a list of homeservers.
|
||||
|
||||
Doing so will cause the remote server to add the event to the graph,
|
||||
and send the event out to the rest of the federation.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
pdu (BaseEvent): event to be sent
|
||||
event_format_version (int): The event format version
|
||||
pdu: event to be sent
|
||||
room_version: the version of the room (according to the server that
|
||||
did the make_join)
|
||||
|
||||
Return:
|
||||
Deferred: resolves to a dict with members ``origin`` (a string
|
||||
giving the serer the event was sent to, ``state`` (?) and
|
||||
Returns:
|
||||
a dict with members ``origin`` (a string
|
||||
giving the server the event was sent to, ``state`` (?) and
|
||||
``auth_chain``.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
|
||||
def check_authchain_validity(signed_auth_chain):
|
||||
for e in signed_auth_chain:
|
||||
if e.type == EventTypes.Create:
|
||||
create_event = e
|
||||
break
|
||||
else:
|
||||
raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,))
|
||||
|
||||
# the room version should be sane.
|
||||
room_version = create_event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# This shouldn't be possible, because the remote server should have
|
||||
# rejected the join attempt during make_join.
|
||||
raise InvalidResponseError(
|
||||
"room appears to have unsupported version %s" % (room_version,)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
content = yield self._do_send_join(destination, pdu)
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
content = await self._do_send_join(destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
||||
|
||||
room_version = None
|
||||
create_event = None
|
||||
for e in state:
|
||||
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
||||
room_version = e.content.get(
|
||||
"room_version", RoomVersions.V1.identifier
|
||||
)
|
||||
create_event = e
|
||||
break
|
||||
|
||||
if room_version is None:
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
|
||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
# the room version should be sane.
|
||||
create_room_version = create_event.content.get(
|
||||
"room_version", RoomVersions.V1.identifier
|
||||
)
|
||||
if create_room_version != room_version.identifier:
|
||||
# either the server that fulfilled the make_join, or the server that is
|
||||
# handling the send_join, is lying.
|
||||
raise InvalidResponseError(
|
||||
"Unexpected room version %s in create event"
|
||||
% (create_room_version,)
|
||||
)
|
||||
|
||||
valid_pdus = await self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
list(pdus.values()),
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
room_version=room_version.identifier,
|
||||
)
|
||||
|
||||
valid_pdus_map = {p.event_id: p for p in valid_pdus}
|
||||
@@ -583,7 +607,17 @@ class FederationClient(FederationBase):
|
||||
for s in signed_state:
|
||||
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
||||
|
||||
check_authchain_validity(signed_auth)
|
||||
# double-check that the same create event has ended up in the auth chain
|
||||
auth_chain_create_events = [
|
||||
e.event_id
|
||||
for e in signed_auth
|
||||
if (e.type, e.state_key) == (EventTypes.Create, "")
|
||||
]
|
||||
if auth_chain_create_events != [create_event.event_id]:
|
||||
raise InvalidResponseError(
|
||||
"Unexpected create event(s) in auth chain"
|
||||
% (auth_chain_create_events,)
|
||||
)
|
||||
|
||||
return {
|
||||
"state": signed_state,
|
||||
@@ -591,14 +625,13 @@ class FederationClient(FederationBase):
|
||||
"origin": destination,
|
||||
}
|
||||
|
||||
return self._try_destination_list("send_join", destinations, send_request)
|
||||
return await self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_join(self, destination, pdu):
|
||||
async def _do_send_join(self, destination: str, pdu: EventBase):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_join_v2(
|
||||
content = await self.transport_layer.send_join_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -620,7 +653,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_join_v1(
|
||||
resp = await self.transport_layer.send_join_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -631,51 +664,45 @@ class FederationClient(FederationBase):
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
async def send_invite(
|
||||
self, destination: str, room_id: str, event_id: str, pdu: EventBase,
|
||||
) -> EventBase:
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
content = yield self._do_send_invite(destination, pdu, room_version)
|
||||
content = await self._do_send_invite(destination, pdu, room_version)
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(pdu_dict, format_ver)
|
||||
pdu = event_from_pdu_json(pdu_dict, room_version)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
return pdu
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_invite(self, destination, pdu, room_version):
|
||||
async def _do_send_invite(
|
||||
self, destination: str, pdu: EventBase, room_version: RoomVersion
|
||||
) -> JsonDict:
|
||||
"""Actually sends the invite, first trying v2 API and falling back to
|
||||
v1 API if necessary.
|
||||
|
||||
Args:
|
||||
destination (str): Target server
|
||||
pdu (FrozenEvent)
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
dict: The event as a dict as returned by the remote server
|
||||
The event as a dict as returned by the remote server
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_invite_v2(
|
||||
content = await self.transport_layer.send_invite_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content={
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
"room_version": room_version,
|
||||
"room_version": room_version.identifier,
|
||||
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
||||
},
|
||||
)
|
||||
@@ -693,8 +720,7 @@ class FederationClient(FederationBase):
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API. That's ok provided the room uses old-style event
|
||||
# IDs.
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if v.event_format != EventFormatVersions.V1:
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
@@ -708,7 +734,7 @@ class FederationClient(FederationBase):
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
|
||||
_, content = yield self.transport_layer.send_invite_v1(
|
||||
_, content = await self.transport_layer.send_invite_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -716,7 +742,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
return content
|
||||
|
||||
def send_leave(self, destinations, pdu):
|
||||
async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
|
||||
"""Sends a leave event to one of a list of homeservers.
|
||||
|
||||
Doing so will cause the remote server to add the event to the graph,
|
||||
@@ -725,34 +751,29 @@ class FederationClient(FederationBase):
|
||||
This is mostly useful to reject received invites.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations: Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
pdu (BaseEvent): event to be sent
|
||||
pdu: event to be sent
|
||||
|
||||
Return:
|
||||
Deferred: resolves to None.
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
RuntimeError if no servers were reachable.
|
||||
"""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
content = yield self._do_send_leave(destination, pdu)
|
||||
|
||||
async def send_request(destination: str) -> None:
|
||||
content = await self._do_send_leave(destination, pdu)
|
||||
logger.debug("Got content: %s", content)
|
||||
return None
|
||||
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
return await self._try_destination_list(
|
||||
"send_leave", destinations, send_request
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_leave(self, destination, pdu):
|
||||
async def _do_send_leave(self, destination, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_leave_v2(
|
||||
content = await self.transport_layer.send_leave_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -774,7 +795,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_leave_v1(
|
||||
resp = await self.transport_layer.send_leave_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -806,34 +827,33 @@ class FederationClient(FederationBase):
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(
|
||||
async def get_missing_events(
|
||||
self,
|
||||
destination,
|
||||
room_id,
|
||||
earliest_events_ids,
|
||||
latest_events,
|
||||
limit,
|
||||
min_depth,
|
||||
timeout,
|
||||
):
|
||||
destination: str,
|
||||
room_id: str,
|
||||
earliest_events_ids: Sequence[str],
|
||||
latest_events: Iterable[EventBase],
|
||||
limit: int,
|
||||
min_depth: int,
|
||||
timeout: int,
|
||||
) -> List[EventBase]:
|
||||
"""Tries to fetch events we are missing. This is called when we receive
|
||||
an event without having received all of its ancestors.
|
||||
|
||||
Args:
|
||||
destination (str)
|
||||
room_id (str)
|
||||
earliest_events_ids (list): List of event ids. Effectively the
|
||||
destination
|
||||
room_id
|
||||
earliest_events_ids: List of event ids. Effectively the
|
||||
events we expected to receive, but haven't. `get_missing_events`
|
||||
should only return events that didn't happen before these.
|
||||
latest_events (list): List of events we have received that we don't
|
||||
latest_events: List of events we have received that we don't
|
||||
have all previous events for.
|
||||
limit (int): Maximum number of events to return.
|
||||
min_depth (int): Minimum depth of events tor return.
|
||||
timeout (int): Max time to wait in ms
|
||||
limit: Maximum number of events to return.
|
||||
min_depth: Minimum depth of events to return.
|
||||
timeout: Max time to wait in ms
|
||||
"""
|
||||
try:
|
||||
content = yield self.transport_layer.get_missing_events(
|
||||
content = await self.transport_layer.get_missing_events(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events_ids,
|
||||
@@ -843,15 +863,14 @@ class FederationClient(FederationBase):
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
events = [
|
||||
event_from_pdu_json(e, format_ver) for e in content.get("events", [])
|
||||
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
||||
]
|
||||
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False, room_version=room_version
|
||||
signed_events = await self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False, room_version=room_version.identifier
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
import six
|
||||
from six import iteritems
|
||||
@@ -22,6 +23,7 @@ from six import iteritems
|
||||
from canonicaljson import json
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
@@ -36,20 +38,23 @@ from synapse.api.errors import (
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import room_version_to_event_format
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.http.endpoint import parse_server_name
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
nested_logging_context,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import glob_to_regex
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import glob_to_regex, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
@@ -76,6 +81,8 @@ class FederationServer(FederationBase):
|
||||
self.handler = hs.get_handlers().federation_handler
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
|
||||
@@ -160,6 +167,43 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
return 400, response
|
||||
|
||||
# We process PDUs and EDUs in parallel. This is important as we don't
|
||||
# want to block things like to device messages from reaching clients
|
||||
# behind the potentially expensive handling of PDUs.
|
||||
pdu_results, _ = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._handle_pdus_in_txn, origin, transaction, request_time
|
||||
),
|
||||
run_in_background(self._handle_edus_in_txn, origin, transaction),
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
response = {"pdus": pdu_results}
|
||||
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
return 200, response
|
||||
|
||||
async def _handle_pdus_in_txn(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
) -> Dict[str, dict]:
|
||||
"""Process the PDUs in a received transaction.
|
||||
|
||||
Args:
|
||||
origin: the server making the request
|
||||
transaction: incoming transaction
|
||||
request_time: timestamp that the HTTP request arrived at
|
||||
|
||||
Returns:
|
||||
A map from event ID of a processed PDU to any errors we should
|
||||
report back to the sending server.
|
||||
"""
|
||||
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
@@ -195,20 +239,13 @@ class FederationServer(FederationBase):
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
||||
try:
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
except UnsupportedRoomVersionError:
|
||||
except UnsupportedRoomVersionError as e:
|
||||
# this can happen if support for a given room version is withdrawn,
|
||||
# so that we still get events for said room.
|
||||
logger.info(
|
||||
"Ignoring PDU for room %s with unknown version %s",
|
||||
room_id,
|
||||
room_version,
|
||||
)
|
||||
logger.info("Ignoring PDU: %s", e)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p, format_ver)
|
||||
event = event_from_pdu_json(p, room_version)
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
pdu_results = {}
|
||||
@@ -250,20 +287,28 @@ class FederationServer(FederationBase):
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
await self.received_edu(origin, edu.edu_type, edu.content)
|
||||
return pdu_results
|
||||
|
||||
response = {"pdus": pdu_results}
|
||||
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
|
||||
"""Process the EDUs in a received transaction.
|
||||
"""
|
||||
|
||||
logger.debug("Returning: %s", str(response))
|
||||
async def _process_edu(edu_dict):
|
||||
received_edus_counter.inc()
|
||||
|
||||
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
return 200, response
|
||||
edu = Edu(
|
||||
origin=origin,
|
||||
destination=self.server_name,
|
||||
edu_type=edu_dict["edu_type"],
|
||||
content=edu_dict["content"],
|
||||
)
|
||||
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
||||
|
||||
async def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
await self.registry.on_edu(edu_type, origin, content)
|
||||
await concurrently_execute(
|
||||
_process_edu,
|
||||
getattr(transaction, "edus", []),
|
||||
TRANSACTION_CONCURRENCY_LIMIT,
|
||||
)
|
||||
|
||||
async def on_context_state_request(self, origin, room_id, event_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
@@ -288,7 +333,7 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version_id(room_id)
|
||||
resp["room_version"] = room_version
|
||||
|
||||
return 200, resp
|
||||
@@ -339,7 +384,7 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version_id(room_id)
|
||||
if room_version not in supported_versions:
|
||||
logger.warning(
|
||||
"Room version %s not in %s", room_version, supported_versions
|
||||
@@ -350,21 +395,22 @@ class FederationServer(FederationBase):
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
async def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
async def on_invite_request(
|
||||
self, origin: str, content: JsonDict, room_version_id: str
|
||||
):
|
||||
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
if not room_version:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
ret_pdu = await self.handler.on_invite_request(origin, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
||||
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
|
||||
@@ -372,15 +418,14 @@ class FederationServer(FederationBase):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
||||
|
||||
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
@@ -394,7 +439,7 @@ class FederationServer(FederationBase):
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version_id(room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
@@ -403,15 +448,14 @@ class FederationServer(FederationBase):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
|
||||
|
||||
await self.handler.on_send_leave_request(origin, pdu)
|
||||
return {}
|
||||
@@ -450,14 +494,13 @@ class FederationServer(FederationBase):
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
|
||||
event_from_pdu_json(e, room_version) for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True, room_version=room_version
|
||||
origin, auth_chain, outlier=True, room_version=room_version.identifier
|
||||
)
|
||||
|
||||
ret = await self.handler.on_query_auth(
|
||||
@@ -482,8 +525,9 @@ class FederationServer(FederationBase):
|
||||
def on_query_client_keys(self, origin, content):
|
||||
return self.on_query_request("client_keys", content)
|
||||
|
||||
def on_query_user_devices(self, origin, user_id):
|
||||
return self.on_query_request("user_devices", user_id)
|
||||
async def on_query_user_devices(self, origin: str, user_id: str):
|
||||
keys = await self.device_handler.on_federation_query_user_devices(user_id)
|
||||
return 200, keys
|
||||
|
||||
@trace
|
||||
async def on_claim_client_keys(self, origin, content):
|
||||
@@ -524,7 +568,7 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d",
|
||||
earliest_events,
|
||||
@@ -537,11 +581,11 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
if len(missing_events) < 5:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Returning %d events: %r", len(missing_events), missing_events
|
||||
)
|
||||
else:
|
||||
logger.info("Returning %d events", len(missing_events))
|
||||
logger.debug("Returning %d events", len(missing_events))
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
@@ -618,7 +662,7 @@ class FederationServer(FederationBase):
|
||||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = await self.store.get_room_version(pdu.room_id)
|
||||
room_version = await self.store.get_room_version_id(pdu.room_id)
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
|
||||
@@ -69,8 +69,6 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
self.edus = SortedDict() # stream position -> Edu
|
||||
|
||||
self.device_messages = SortedDict() # stream position -> destination
|
||||
|
||||
self.pos = 1
|
||||
self.pos_time = SortedDict()
|
||||
|
||||
@@ -92,7 +90,6 @@ class FederationRemoteSendQueue(object):
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"device_messages",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
@@ -171,12 +168,6 @@ class FederationRemoteSendQueue(object):
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
# Delete things out of device map
|
||||
keys = self.device_messages.keys()
|
||||
i = self.device_messages.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.device_messages[key]
|
||||
|
||||
def notify_new_events(self, current_id):
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
@@ -249,9 +240,8 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
"""As per FederationSender"""
|
||||
pos = self._next_pos()
|
||||
self.device_messages[pos] = destination
|
||||
self.notifier.on_new_replication_data()
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
def get_current_token(self):
|
||||
return self.pos - 1
|
||||
@@ -259,7 +249,9 @@ class FederationRemoteSendQueue(object):
|
||||
def federation_ack(self, token):
|
||||
self._clear_queue_before_pos(token)
|
||||
|
||||
def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
|
||||
async def get_replication_rows(
|
||||
self, from_token, to_token, limit, federation_ack=None
|
||||
):
|
||||
"""Get rows to be sent over federation between the two tokens
|
||||
|
||||
Args:
|
||||
@@ -337,14 +329,6 @@ class FederationRemoteSendQueue(object):
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EduRow(edu)))
|
||||
|
||||
# Fetch changed device messages
|
||||
i = self.device_messages.bisect_right(from_token)
|
||||
j = self.device_messages.bisect_right(to_token) + 1
|
||||
device_messages = {v: k for k, v in self.device_messages.items()[i:j]}
|
||||
|
||||
for (destination, pos) in iteritems(device_messages):
|
||||
rows.append((pos, DeviceRow(destination=destination)))
|
||||
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
@@ -470,28 +454,9 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
|
||||
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||
|
||||
|
||||
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", ("destination",))): # str
|
||||
"""Streams the fact that either a) there is pending to device messages for
|
||||
users on the remote, or b) a local users device has changed and needs to
|
||||
be sent to the remote.
|
||||
"""
|
||||
|
||||
TypeId = "d"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return DeviceRow(destination=data["destination"])
|
||||
|
||||
def to_data(self):
|
||||
return {"destination": self.destination}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.device_destinations.add(self.destination)
|
||||
|
||||
|
||||
TypeToRow = {
|
||||
Row.TypeId: Row
|
||||
for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow, DeviceRow)
|
||||
for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow,)
|
||||
}
|
||||
|
||||
|
||||
@@ -502,7 +467,6 @@ ParsedFederationStreamData = namedtuple(
|
||||
"presence_destinations", # list of tuples of UserPresenceState and destinations
|
||||
"keyed_edus", # dict of destination -> { key -> Edu }
|
||||
"edus", # dict of destination -> [Edu]
|
||||
"device_destinations", # set of destinations
|
||||
),
|
||||
)
|
||||
|
||||
@@ -521,11 +485,7 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
# them into the appropriate collection and then send them off.
|
||||
|
||||
buff = ParsedFederationStreamData(
|
||||
presence=[],
|
||||
presence_destinations=[],
|
||||
keyed_edus={},
|
||||
edus={},
|
||||
device_destinations=set(),
|
||||
presence=[], presence_destinations=[], keyed_edus={}, edus={},
|
||||
)
|
||||
|
||||
# Parse the rows in the stream and add to the buffer
|
||||
@@ -553,6 +513,3 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
for destination, edu_list in iteritems(buff.edus):
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(edu, None)
|
||||
|
||||
for destination in buff.device_destinations:
|
||||
transaction_queue.send_device_messages(destination)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, Hashable, Iterable, List, Optional, Set
|
||||
|
||||
from six import itervalues
|
||||
|
||||
@@ -21,7 +22,9 @@ from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
import synapse.metrics
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
@@ -38,6 +41,8 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -54,7 +59,7 @@ sent_pdus_destination_dist_total = Counter(
|
||||
|
||||
|
||||
class FederationSender(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = hs.hostname
|
||||
|
||||
@@ -67,7 +72,7 @@ class FederationSender(object):
|
||||
self._transaction_manager = TransactionManager(hs)
|
||||
|
||||
# map from destination to PerDestinationQueue
|
||||
self._per_destination_queues = {} # type: dict[str, PerDestinationQueue]
|
||||
self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue]
|
||||
|
||||
LaterGauge(
|
||||
"synapse_federation_transaction_queue_pending_destinations",
|
||||
@@ -83,7 +88,7 @@ class FederationSender(object):
|
||||
# Map of user_id -> UserPresenceState for all the pending presence
|
||||
# to be sent out by user_id. Entries here get processed and put in
|
||||
# pending_presence_by_dest
|
||||
self.pending_presence = {}
|
||||
self.pending_presence = {} # type: Dict[str, UserPresenceState]
|
||||
|
||||
LaterGauge(
|
||||
"synapse_federation_transaction_queue_pending_pdus",
|
||||
@@ -115,20 +120,17 @@ class FederationSender(object):
|
||||
# and that there is a pending call to _flush_rrs_for_room in the system.
|
||||
self._queues_awaiting_rr_flush_by_room = (
|
||||
{}
|
||||
) # type: dict[str, set[PerDestinationQueue]]
|
||||
) # type: Dict[str, Set[PerDestinationQueue]]
|
||||
|
||||
self._rr_txn_interval_per_room_ms = (
|
||||
1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
|
||||
1000.0 / hs.config.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
def _get_per_destination_queue(self, destination):
|
||||
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
||||
"""Get or create a PerDestinationQueue for the given destination
|
||||
|
||||
Args:
|
||||
destination (str): server_name of remote server
|
||||
|
||||
Returns:
|
||||
PerDestinationQueue
|
||||
destination: server_name of remote server
|
||||
"""
|
||||
queue = self._per_destination_queues.get(destination)
|
||||
if not queue:
|
||||
@@ -136,7 +138,7 @@ class FederationSender(object):
|
||||
self._per_destination_queues[destination] = queue
|
||||
return queue
|
||||
|
||||
def notify_new_events(self, current_id):
|
||||
def notify_new_events(self, current_id: int) -> None:
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
@@ -150,13 +152,12 @@ class FederationSender(object):
|
||||
"process_event_queue_for_federation", self._process_event_queue_loop
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _process_event_queue_loop(self):
|
||||
async def _process_event_queue_loop(self) -> None:
|
||||
try:
|
||||
self._is_processing = True
|
||||
while True:
|
||||
last_token = yield self.store.get_federation_out_pos("events")
|
||||
next_token, events = yield self.store.get_all_new_events_stream(
|
||||
last_token = await self.store.get_federation_out_pos("events")
|
||||
next_token, events = await self.store.get_all_new_events_stream(
|
||||
last_token, self._last_poked_id, limit=100
|
||||
)
|
||||
|
||||
@@ -165,8 +166,7 @@ class FederationSender(object):
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_event(event):
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
@@ -183,7 +183,7 @@ class FederationSender(object):
|
||||
# Otherwise if the last member on a server in a room is
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
destinations = yield self.state.get_hosts_in_room_at_events(
|
||||
destinations = await self.state.get_hosts_in_room_at_events(
|
||||
event.room_id, event_ids=event.prev_event_ids()
|
||||
)
|
||||
except Exception:
|
||||
@@ -205,17 +205,16 @@ class FederationSender(object):
|
||||
|
||||
self._send_pdu(event, destinations)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_room_events(events):
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
yield handle_event(event)
|
||||
await handle_event(event)
|
||||
|
||||
events_by_room = {}
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
yield make_deferred_yieldable(
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(handle_room_events, evs)
|
||||
@@ -225,11 +224,11 @@ class FederationSender(object):
|
||||
)
|
||||
)
|
||||
|
||||
yield self.store.update_federation_out_pos("events", next_token)
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
if events:
|
||||
now = self.clock.time_msec()
|
||||
ts = yield self.store.get_received_ts(events[-1].event_id)
|
||||
ts = await self.store.get_received_ts(events[-1].event_id)
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
"federation_sender"
|
||||
@@ -253,7 +252,7 @@ class FederationSender(object):
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
def _send_pdu(self, pdu, destinations):
|
||||
def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
@@ -275,11 +274,11 @@ class FederationSender(object):
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu, order)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_read_receipt(self, receipt):
|
||||
def send_read_receipt(self, receipt: ReadReceipt):
|
||||
"""Send a RR to any other servers in the room
|
||||
|
||||
Args:
|
||||
receipt (synapse.types.ReadReceipt): receipt to be sent
|
||||
receipt: receipt to be sent
|
||||
"""
|
||||
|
||||
# Some background on the rate-limiting going on here.
|
||||
@@ -342,7 +341,7 @@ class FederationSender(object):
|
||||
else:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
def _schedule_rr_flush_for_room(self, room_id, n_domains):
|
||||
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
|
||||
# that is going to cause approximately len(domains) transactions, so now back
|
||||
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
|
||||
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
|
||||
@@ -351,7 +350,7 @@ class FederationSender(object):
|
||||
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
|
||||
self._queues_awaiting_rr_flush_by_room[room_id] = set()
|
||||
|
||||
def _flush_rrs_for_room(self, room_id):
|
||||
def _flush_rrs_for_room(self, room_id: str) -> None:
|
||||
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
|
||||
logger.debug("Flushing RRs in %s to %s", room_id, queues)
|
||||
|
||||
@@ -367,14 +366,11 @@ class FederationSender(object):
|
||||
|
||||
@preserve_fn # the caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def send_presence(self, states):
|
||||
def send_presence(self, states: List[UserPresenceState]):
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
triggers a background task to process them and send out the transactions.
|
||||
|
||||
Args:
|
||||
states (list(UserPresenceState))
|
||||
"""
|
||||
if not self.hs.config.use_presence:
|
||||
# No-op if presence is disabled.
|
||||
@@ -411,11 +407,10 @@ class FederationSender(object):
|
||||
finally:
|
||||
self._processing_pending_presence = False
|
||||
|
||||
def send_presence_to_destinations(self, states, destinations):
|
||||
def send_presence_to_destinations(
|
||||
self, states: List[UserPresenceState], destinations: List[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
|
||||
Args:
|
||||
states (list[UserPresenceState])
|
||||
destinations (list[str])
|
||||
"""
|
||||
|
||||
@@ -430,12 +425,9 @@ class FederationSender(object):
|
||||
|
||||
@measure_func("txnqueue._process_presence")
|
||||
@defer.inlineCallbacks
|
||||
def _process_presence_inner(self, states):
|
||||
def _process_presence_inner(self, states: List[UserPresenceState]):
|
||||
"""Given a list of states populate self.pending_presence_by_dest and
|
||||
poke to send a new transaction to each destination
|
||||
|
||||
Args:
|
||||
states (list(UserPresenceState))
|
||||
"""
|
||||
hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
|
||||
|
||||
@@ -445,14 +437,20 @@ class FederationSender(object):
|
||||
continue
|
||||
self._get_per_destination_queue(destination).send_presence(states)
|
||||
|
||||
def build_and_send_edu(self, destination, edu_type, content, key=None):
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: dict,
|
||||
key: Optional[Hashable] = None,
|
||||
):
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
destination (str): name of server to send to
|
||||
edu_type (str): type of EDU to send
|
||||
content (dict): content of EDU
|
||||
key (Any|None): clobbering key for this edu
|
||||
destination: name of server to send to
|
||||
edu_type: type of EDU to send
|
||||
content: content of EDU
|
||||
key: clobbering key for this edu
|
||||
"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
@@ -467,12 +465,12 @@ class FederationSender(object):
|
||||
|
||||
self.send_edu(edu, key)
|
||||
|
||||
def send_edu(self, edu, key):
|
||||
def send_edu(self, edu: Edu, key: Optional[Hashable]):
|
||||
"""Queue an EDU for sending
|
||||
|
||||
Args:
|
||||
edu (Edu): edu to send
|
||||
key (Any|None): clobbering key for this edu
|
||||
edu: edu to send
|
||||
key: clobbering key for this edu
|
||||
"""
|
||||
queue = self._get_per_destination_queue(edu.destination)
|
||||
if key:
|
||||
@@ -480,12 +478,25 @@ class FederationSender(object):
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
def send_device_messages(self, destination: str):
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending device update to ourselves")
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
|
||||
def get_current_token(self):
|
||||
def wake_destination(self, destination: str):
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
This is mainly useful if the remote server has been down and we think it
|
||||
might have come back.
|
||||
"""
|
||||
|
||||
if destination == self.server_name:
|
||||
logger.warning("Not waking up ourselves")
|
||||
return
|
||||
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return 0
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
# limitations under the License.
|
||||
import datetime
|
||||
import logging
|
||||
from typing import Dict, Hashable, Iterable, List, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.server
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
@@ -31,6 +31,7 @@ from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
|
||||
# This is defined in the Matrix spec and enforced by the receiver.
|
||||
@@ -55,13 +56,18 @@ class PerDestinationQueue(object):
|
||||
Manages the per-destination transmission queues.
|
||||
|
||||
Args:
|
||||
hs (synapse.HomeServer):
|
||||
transaction_sender (TransactionManager):
|
||||
destination (str): the server_name of the destination that we are managing
|
||||
hs
|
||||
transaction_sender
|
||||
destination: the server_name of the destination that we are managing
|
||||
transmission for.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transaction_manager, destination):
|
||||
def __init__(
|
||||
self,
|
||||
hs: "synapse.server.HomeServer",
|
||||
transaction_manager: "synapse.federation.sender.TransactionManager",
|
||||
destination: str,
|
||||
):
|
||||
self._server_name = hs.hostname
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastore()
|
||||
@@ -71,20 +77,20 @@ class PerDestinationQueue(object):
|
||||
self.transmission_loop_running = False
|
||||
|
||||
# a list of tuples of (pending pdu, order)
|
||||
self._pending_pdus = [] # type: list[tuple[EventBase, int]]
|
||||
self._pending_edus = [] # type: list[Edu]
|
||||
self._pending_pdus = [] # type: List[Tuple[EventBase, int]]
|
||||
self._pending_edus = [] # type: List[Edu]
|
||||
|
||||
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
|
||||
# based on their key (e.g. typing events by room_id)
|
||||
# Map of (edu_type, key) -> Edu
|
||||
self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu]
|
||||
self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu]
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence = {} # type: dict[str, UserPresenceState]
|
||||
self._pending_presence = {} # type: Dict[str, UserPresenceState]
|
||||
|
||||
# room_id -> receipt_type -> user_id -> receipt_dict
|
||||
self._pending_rrs = {}
|
||||
self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]]
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
# stream_id of last successfully sent to-device message.
|
||||
@@ -94,50 +100,50 @@ class PerDestinationQueue(object):
|
||||
# stream_id of last successfully sent device list update.
|
||||
self._last_device_list_stream_id = 0
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return "PerDestinationQueue[%s]" % self._destination
|
||||
|
||||
def pending_pdu_count(self):
|
||||
def pending_pdu_count(self) -> int:
|
||||
return len(self._pending_pdus)
|
||||
|
||||
def pending_edu_count(self):
|
||||
def pending_edu_count(self) -> int:
|
||||
return (
|
||||
len(self._pending_edus)
|
||||
+ len(self._pending_presence)
|
||||
+ len(self._pending_edus_keyed)
|
||||
)
|
||||
|
||||
def send_pdu(self, pdu, order):
|
||||
def send_pdu(self, pdu: EventBase, order: int) -> None:
|
||||
"""Add a PDU to the queue, and start the transmission loop if neccessary
|
||||
|
||||
Args:
|
||||
pdu (EventBase): pdu to send
|
||||
order (int):
|
||||
pdu: pdu to send
|
||||
order
|
||||
"""
|
||||
self._pending_pdus.append((pdu, order))
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def send_presence(self, states):
|
||||
def send_presence(self, states: Iterable[UserPresenceState]) -> None:
|
||||
"""Add presence updates to the queue. Start the transmission loop if neccessary.
|
||||
|
||||
Args:
|
||||
states (iterable[UserPresenceState]): presence to send
|
||||
states: presence to send
|
||||
"""
|
||||
self._pending_presence.update({state.user_id: state for state in states})
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def queue_read_receipt(self, receipt):
|
||||
def queue_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Add a RR to the list to be sent. Doesn't start the transmission loop yet
|
||||
(see flush_read_receipts_for_room)
|
||||
|
||||
Args:
|
||||
receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
|
||||
receipt: receipt to be queued
|
||||
"""
|
||||
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
|
||||
receipt.receipt_type, {}
|
||||
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id):
|
||||
def flush_read_receipts_for_room(self, room_id: str) -> None:
|
||||
# if we don't have any read-receipts for this room, it may be that we've already
|
||||
# sent them out, so we don't need to flush.
|
||||
if room_id not in self._pending_rrs:
|
||||
@@ -145,15 +151,15 @@ class PerDestinationQueue(object):
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def send_keyed_edu(self, edu, key):
|
||||
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
|
||||
self._pending_edus_keyed[(edu.edu_type, key)] = edu
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def send_edu(self, edu):
|
||||
def send_edu(self, edu) -> None:
|
||||
self._pending_edus.append(edu)
|
||||
self.attempt_new_transaction()
|
||||
|
||||
def attempt_new_transaction(self):
|
||||
def attempt_new_transaction(self) -> None:
|
||||
"""Try to start a new transaction to this destination
|
||||
|
||||
If there is already a transaction in progress to this destination,
|
||||
@@ -176,23 +182,22 @@ class PerDestinationQueue(object):
|
||||
self._transaction_transmission_loop,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _transaction_transmission_loop(self):
|
||||
pending_pdus = []
|
||||
async def _transaction_transmission_loop(self) -> None:
|
||||
pending_pdus = [] # type: List[Tuple[EventBase, int]]
|
||||
try:
|
||||
self.transmission_loop_running = True
|
||||
|
||||
# This will throw if we wouldn't retry. We do this here so we fail
|
||||
# quickly, but we will later check this again in the http client,
|
||||
# hence why we throw the result away.
|
||||
yield get_retry_limiter(self._destination, self._clock, self._store)
|
||||
await get_retry_limiter(self._destination, self._clock, self._store)
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = yield self._get_device_update_edus(
|
||||
device_update_edus, dev_list_id = await self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
@@ -201,7 +206,7 @@ class PerDestinationQueue(object):
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = yield self._get_to_device_message_edus(limit)
|
||||
) = await self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
@@ -268,7 +273,7 @@ class PerDestinationQueue(object):
|
||||
|
||||
# END CRITICAL SECTION
|
||||
|
||||
success = yield self._transaction_manager.send_new_transaction(
|
||||
success = await self._transaction_manager.send_new_transaction(
|
||||
self._destination, pending_pdus, pending_edus
|
||||
)
|
||||
if success:
|
||||
@@ -279,7 +284,7 @@ class PerDestinationQueue(object):
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if to_device_edus:
|
||||
yield self._store.delete_device_msgs_for_remote(
|
||||
await self._store.delete_device_msgs_for_remote(
|
||||
self._destination, device_stream_id
|
||||
)
|
||||
|
||||
@@ -288,7 +293,7 @@ class PerDestinationQueue(object):
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self._destination, dev_list_id
|
||||
)
|
||||
yield self._store.mark_as_sent_devices_by_remote(
|
||||
await self._store.mark_as_sent_devices_by_remote(
|
||||
self._destination, dev_list_id
|
||||
)
|
||||
|
||||
@@ -333,7 +338,7 @@ class PerDestinationQueue(object):
|
||||
# We want to be *very* sure we clear this after we stop processing
|
||||
self.transmission_loop_running = False
|
||||
|
||||
def _get_rr_edus(self, force_flush):
|
||||
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
|
||||
if not self._pending_rrs:
|
||||
return
|
||||
if not force_flush and not self._rrs_pending_flush:
|
||||
@@ -350,17 +355,16 @@ class PerDestinationQueue(object):
|
||||
self._rrs_pending_flush = False
|
||||
yield edu
|
||||
|
||||
def _pop_pending_edus(self, limit):
|
||||
def _pop_pending_edus(self, limit: int) -> List[Edu]:
|
||||
pending_edus = self._pending_edus
|
||||
pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
|
||||
return pending_edus
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_device_update_edus(self, limit):
|
||||
async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
|
||||
last_device_list = self._last_device_list_stream_id
|
||||
|
||||
# Retrieve list of new device updates to send to the destination
|
||||
now_stream_id, results = yield self._store.get_device_updates_by_remote(
|
||||
now_stream_id, results = await self._store.get_device_updates_by_remote(
|
||||
self._destination, last_device_list, limit=limit
|
||||
)
|
||||
edus = [
|
||||
@@ -377,11 +381,10 @@ class PerDestinationQueue(object):
|
||||
|
||||
return (edus, now_stream_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_to_device_message_edus(self, limit):
|
||||
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
|
||||
last_device_stream_id = self._last_device_stream_id
|
||||
to_device_stream_id = self._store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
|
||||
contents, stream_id = await self._store.get_new_device_msgs_for_remote(
|
||||
self._destination, last_device_stream_id, to_device_stream_id, limit
|
||||
)
|
||||
edus = [
|
||||
|
||||
@@ -13,14 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.server
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Transaction
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.logging.opentracing import (
|
||||
extract_text_map,
|
||||
set_tag,
|
||||
@@ -39,7 +40,7 @@ class TransactionManager(object):
|
||||
shared between PerDestinationQueue objects
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self._server_name = hs.hostname
|
||||
self.clock = hs.get_clock() # nb must be called this for @measure_func
|
||||
self._store = hs.get_datastore()
|
||||
@@ -50,8 +51,9 @@ class TransactionManager(object):
|
||||
self._next_txn_id = int(self.clock.time_msec())
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
@defer.inlineCallbacks
|
||||
def send_new_transaction(self, destination, pending_pdus, pending_edus):
|
||||
async def send_new_transaction(
|
||||
self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
|
||||
):
|
||||
|
||||
# Make a transaction-sending opentracing span. This span follows on from
|
||||
# all the edus in that transaction. This needs to be done since there is
|
||||
@@ -127,7 +129,7 @@ class TransactionManager(object):
|
||||
return data
|
||||
|
||||
try:
|
||||
response = yield self._transport_layer.send_transaction(
|
||||
response = await self._transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
@@ -352,7 +353,9 @@ class TransportLayerClient(object):
|
||||
else:
|
||||
path = _create_v1_path("/publicRooms")
|
||||
|
||||
args = {"include_all_networks": "true" if include_all_networks else "false"}
|
||||
args = {
|
||||
"include_all_networks": "true" if include_all_networks else "false"
|
||||
} # type: Dict[str, Any]
|
||||
if third_party_instance_id:
|
||||
args["third_party_instance_id"] = (third_party_instance_id,)
|
||||
if limit:
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional, Tuple, Type
|
||||
|
||||
from twisted.internet.defer import maybeDeferred
|
||||
|
||||
@@ -44,6 +45,7 @@ from synapse.logging.opentracing import (
|
||||
tags,
|
||||
whitelisted_homeserver,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -101,12 +103,17 @@ class NoAuthenticationError(AuthenticationError):
|
||||
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: HomeServer):
|
||||
self._clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
|
||||
self.notifer = hs.get_notifier()
|
||||
|
||||
self.replication_client = None
|
||||
if hs.config.worker.worker_app:
|
||||
self.replication_client = hs.get_tcp_replication()
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
async def authenticate_request(self, request, content):
|
||||
@@ -151,7 +158,7 @@ class Authenticator(object):
|
||||
origin, json_request, now, "Incoming request"
|
||||
)
|
||||
|
||||
logger.info("Request from %s", origin)
|
||||
logger.debug("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
@@ -166,6 +173,17 @@ class Authenticator(object):
|
||||
try:
|
||||
logger.info("Marking origin %r as up", origin)
|
||||
await self.store.set_destination_retry_timings(origin, None, 0, 0)
|
||||
|
||||
# Inform the relevant places that the remote server is back up.
|
||||
self.notifer.notify_remote_server_up(origin)
|
||||
if self.replication_client:
|
||||
# If we're on a worker we try and inform master about this. The
|
||||
# replication client doesn't hook into the notifier to avoid
|
||||
# infinite loops where we send a `REMOTE_SERVER_UP` command to
|
||||
# master, which then echoes it back to us which in turn pokes
|
||||
# the notifier.
|
||||
self.replication_client.send_remote_server_up(origin)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Error resetting retry timings on %s", origin)
|
||||
|
||||
@@ -250,6 +268,8 @@ class BaseFederationServlet(object):
|
||||
returned.
|
||||
"""
|
||||
|
||||
PATH = "" # Overridden in subclasses, the regex to match against the path.
|
||||
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
|
||||
@@ -330,9 +350,6 @@ class BaseFederationServlet(object):
|
||||
|
||||
return response
|
||||
|
||||
# Extra logic that functools.wraps() doesn't finish
|
||||
new_func.__self__ = func.__self__
|
||||
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
@@ -562,7 +579,7 @@ class FederationV1InviteServlet(BaseFederationServlet):
|
||||
# state resolution algorithm, and we don't use that for processing
|
||||
# invites
|
||||
content = await self.handler.on_invite_request(
|
||||
origin, content, room_version=RoomVersions.V1.identifier
|
||||
origin, content, room_version_id=RoomVersions.V1.identifier
|
||||
)
|
||||
|
||||
# V1 federation API is defined to return a content of `[200, {...}]`
|
||||
@@ -589,7 +606,7 @@ class FederationV2InviteServlet(BaseFederationServlet):
|
||||
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
|
||||
|
||||
content = await self.handler.on_invite_request(
|
||||
origin, event, room_version=room_version
|
||||
origin, event, room_version_id=room_version
|
||||
)
|
||||
return 200, content
|
||||
|
||||
@@ -807,7 +824,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit = int(content.get("limit", 100))
|
||||
limit = int(content.get("limit", 100)) # type: Optional[int]
|
||||
since_token = content.get("since", None)
|
||||
search_filter = content.get("filter", None)
|
||||
|
||||
@@ -954,7 +971,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
result = await self.groups_handler.update_room_in_group(
|
||||
result = await self.handler.update_room_in_group(
|
||||
group_id, requester_user_id, room_id, config_key, content
|
||||
)
|
||||
|
||||
@@ -1405,11 +1422,13 @@ FEDERATION_SERVLET_CLASSES = (
|
||||
On3pidBindServlet,
|
||||
FederationVersionServlet,
|
||||
RoomComplexityServlet,
|
||||
)
|
||||
) # type: Tuple[Type[BaseFederationServlet], ...]
|
||||
|
||||
OPENID_SERVLET_CLASSES = (OpenIdUserInfo,)
|
||||
OPENID_SERVLET_CLASSES = (
|
||||
OpenIdUserInfo,
|
||||
) # type: Tuple[Type[BaseFederationServlet], ...]
|
||||
|
||||
ROOM_LIST_CLASSES = (PublicRoomList,)
|
||||
ROOM_LIST_CLASSES = (PublicRoomList,) # type: Tuple[Type[PublicRoomList], ...]
|
||||
|
||||
GROUP_SERVER_SERVLET_CLASSES = (
|
||||
FederationGroupsProfileServlet,
|
||||
@@ -1430,17 +1449,19 @@ GROUP_SERVER_SERVLET_CLASSES = (
|
||||
FederationGroupsAddRoomsServlet,
|
||||
FederationGroupsAddRoomsConfigServlet,
|
||||
FederationGroupsSettingJoinPolicyServlet,
|
||||
)
|
||||
) # type: Tuple[Type[BaseFederationServlet], ...]
|
||||
|
||||
|
||||
GROUP_LOCAL_SERVLET_CLASSES = (
|
||||
FederationGroupsLocalInviteServlet,
|
||||
FederationGroupsRemoveLocalUserServlet,
|
||||
FederationGroupsBulkPublicisedServlet,
|
||||
)
|
||||
) # type: Tuple[Type[BaseFederationServlet], ...]
|
||||
|
||||
|
||||
GROUP_ATTESTATION_SERVLET_CLASSES = (FederationGroupsRenewAttestaionServlet,)
|
||||
GROUP_ATTESTATION_SERVLET_CLASSES = (
|
||||
FederationGroupsRenewAttestaionServlet,
|
||||
) # type: Tuple[Type[BaseFederationServlet], ...]
|
||||
|
||||
DEFAULT_SERVLET_GROUPS = (
|
||||
"federation",
|
||||
|
||||
@@ -19,11 +19,15 @@ server protocol.
|
||||
|
||||
import logging
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.jsonobject import JsonEncodedObject
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class Edu(JsonEncodedObject):
|
||||
""" An Edu represents a piece of data sent from one homeserver to another.
|
||||
|
||||
@@ -32,11 +36,24 @@ class Edu(JsonEncodedObject):
|
||||
internal ID or previous references graph.
|
||||
"""
|
||||
|
||||
valid_keys = ["origin", "destination", "edu_type", "content"]
|
||||
edu_type = attr.ib(type=str)
|
||||
content = attr.ib(type=dict)
|
||||
origin = attr.ib(type=str)
|
||||
destination = attr.ib(type=str)
|
||||
|
||||
required_keys = ["edu_type"]
|
||||
def get_dict(self) -> JsonDict:
|
||||
return {
|
||||
"edu_type": self.edu_type,
|
||||
"content": self.content,
|
||||
}
|
||||
|
||||
internal_keys = ["origin", "destination"]
|
||||
def get_internal_dict(self) -> JsonDict:
|
||||
return {
|
||||
"edu_type": self.edu_type,
|
||||
"content": self.content,
|
||||
"origin": self.origin,
|
||||
"destination": self.destination,
|
||||
}
|
||||
|
||||
def get_context(self):
|
||||
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
|
||||
|
||||
@@ -36,7 +36,7 @@ logger = logging.getLogger(__name__)
|
||||
# TODO: Flairs
|
||||
|
||||
|
||||
class GroupsServerHandler(object):
|
||||
class GroupsServerWorkerHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
@@ -51,9 +51,6 @@ class GroupsServerHandler(object):
|
||||
self.transport_client = hs.get_federation_transport_client()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_group_is_ours(
|
||||
self, group_id, requester_user_id, and_exists=False, and_is_admin=None
|
||||
@@ -167,68 +164,6 @@ class GroupsServerHandler(object):
|
||||
"user": membership_info,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id, content
|
||||
):
|
||||
"""Add/update a room to the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_room_to_summary(
|
||||
group_id=group_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id
|
||||
):
|
||||
"""Remove a room from the summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_room_from_summary(
|
||||
group_id=group_id, room_id=room_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||
"""Sets the group join policy.
|
||||
|
||||
Currently supported policies are:
|
||||
- "invite": an invite must be received and accepted in order to join.
|
||||
- "open": anyone can join.
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
join_policy = _parse_join_policy_from_contents(content)
|
||||
if join_policy is None:
|
||||
raise SynapseError(400, "No value specified for 'm.join_policy'")
|
||||
|
||||
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_categories(self, group_id, requester_user_id):
|
||||
"""Get all categories in a group (as seen by user)
|
||||
@@ -248,42 +183,10 @@ class GroupsServerHandler(object):
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
logger.info("group %s", res)
|
||||
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
||||
"""Add/Update a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_category(
|
||||
group_id=group_id,
|
||||
category_id=category_id,
|
||||
is_public=is_public,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Delete a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_group_category(
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_roles(self, group_id, requester_user_id):
|
||||
"""Get all roles in a group (as seen by user)
|
||||
@@ -302,74 +205,6 @@ class GroupsServerHandler(object):
|
||||
res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||
"""Add/update a role in a group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_role(
|
||||
group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Remove role from group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_user(
|
||||
self, group_id, requester_user_id, user_id, role_id, content
|
||||
):
|
||||
"""Add/update a users entry in the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_user_to_summary(
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
||||
"""Remove a user from the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_user_from_summary(
|
||||
group_id=group_id, user_id=user_id, role_id=role_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_profile(self, group_id, requester_user_id):
|
||||
"""Get the group profile as seen by requester_user_id
|
||||
@@ -394,24 +229,6 @@ class GroupsServerHandler(object):
|
||||
else:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_profile(self, group_id, requester_user_id, content):
|
||||
"""Update the group profile
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
profile = {}
|
||||
for keyname in ("name", "avatar_url", "short_description", "long_description"):
|
||||
if keyname in content:
|
||||
value = content[keyname]
|
||||
if not isinstance(value, string_types):
|
||||
raise SynapseError(400, "%r value is not a string" % (keyname,))
|
||||
profile[keyname] = value
|
||||
|
||||
yield self.store.update_group_profile(group_id, profile)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get the users in group as seen by requester_user_id.
|
||||
@@ -530,6 +347,196 @@ class GroupsServerHandler(object):
|
||||
|
||||
return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
|
||||
|
||||
|
||||
class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
def __init__(self, hs):
|
||||
super(GroupsServerHandler, self).__init__(hs)
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id, content
|
||||
):
|
||||
"""Add/update a room to the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_room_to_summary(
|
||||
group_id=group_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id
|
||||
):
|
||||
"""Remove a room from the summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_room_from_summary(
|
||||
group_id=group_id, room_id=room_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||
"""Sets the group join policy.
|
||||
|
||||
Currently supported policies are:
|
||||
- "invite": an invite must be received and accepted in order to join.
|
||||
- "open": anyone can join.
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
join_policy = _parse_join_policy_from_contents(content)
|
||||
if join_policy is None:
|
||||
raise SynapseError(400, "No value specified for 'm.join_policy'")
|
||||
|
||||
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
||||
"""Add/Update a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_category(
|
||||
group_id=group_id,
|
||||
category_id=category_id,
|
||||
is_public=is_public,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Delete a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_group_category(
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||
"""Add/update a role in a group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_role(
|
||||
group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Remove role from group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_user(
|
||||
self, group_id, requester_user_id, user_id, role_id, content
|
||||
):
|
||||
"""Add/update a users entry in the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_user_to_summary(
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
||||
"""Remove a user from the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_user_from_summary(
|
||||
group_id=group_id, user_id=user_id, role_id=role_id
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_profile(self, group_id, requester_user_id, content):
|
||||
"""Update the group profile
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
profile = {}
|
||||
for keyname in ("name", "avatar_url", "short_description", "long_description"):
|
||||
if keyname in content:
|
||||
value = content[keyname]
|
||||
if not isinstance(value, string_types):
|
||||
raise SynapseError(400, "%r value is not a string" % (keyname,))
|
||||
profile[keyname] = value
|
||||
|
||||
yield self.store.update_group_profile(group_id, profile)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
||||
"""Add room to group
|
||||
|
||||
@@ -25,6 +25,15 @@ from synapse.app import check_bind_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ACME_REGISTER_FAIL_ERROR = """
|
||||
--------------------------------------------------------------------------------
|
||||
Failed to register with the ACME provider. This is likely happening because the installation
|
||||
is new, and ACME v1 has been deprecated by Let's Encrypt and disabled for
|
||||
new installations since November 2019.
|
||||
At the moment, Synapse doesn't support ACME v2. For more information and alternative
|
||||
solutions, please read https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
class AcmeHandler(object):
|
||||
def __init__(self, hs):
|
||||
@@ -71,7 +80,12 @@ class AcmeHandler(object):
|
||||
# want it to control where we save the certificates, we have to reach in
|
||||
# and trigger the registration machinery ourselves.
|
||||
self._issuer._registered = False
|
||||
yield self._issuer._ensure_registered()
|
||||
|
||||
try:
|
||||
yield self._issuer._ensure_registered()
|
||||
except Exception:
|
||||
logger.error(ACME_REGISTER_FAIL_ERROR)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.types import RoomStreamToken, StateMap
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -51,68 +53,17 @@ class AdminHandler(BaseHandler):
|
||||
|
||||
return ret
|
||||
|
||||
async def get_users(self):
|
||||
"""Function to retrieve a list of users in users table.
|
||||
|
||||
Args:
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
ret = await self.store.get_users()
|
||||
|
||||
async def get_user(self, user):
|
||||
"""Function to get user details"""
|
||||
ret = await self.store.get_user_by_id(user.to_string())
|
||||
if ret:
|
||||
profile = await self.store.get_profileinfo(user.localpart)
|
||||
threepids = await self.store.user_get_threepids(user.to_string())
|
||||
ret["displayname"] = profile.display_name
|
||||
ret["avatar_url"] = profile.avatar_url
|
||||
ret["threepids"] = threepids
|
||||
return ret
|
||||
|
||||
async def get_users_paginate(self, start, limit, name, guests, deactivated):
|
||||
"""Function to retrieve a paginated list of users from
|
||||
users list. This will return a json list of users.
|
||||
|
||||
Args:
|
||||
start (int): start number to begin the query from
|
||||
limit (int): number of rows to retrieve
|
||||
name (string): filter for user names
|
||||
guests (bool): whether to in include guest users
|
||||
deactivated (bool): whether to include deactivated users
|
||||
Returns:
|
||||
defer.Deferred: resolves to json list[dict[str, Any]]
|
||||
"""
|
||||
ret = await self.store.get_users_paginate(
|
||||
start, limit, name, guests, deactivated
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
async def search_users(self, term):
|
||||
"""Function to search users list for one or more users with
|
||||
the matched term.
|
||||
|
||||
Args:
|
||||
term (str): search term
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
ret = await self.store.search_users(term)
|
||||
|
||||
return ret
|
||||
|
||||
def get_user_server_admin(self, user):
|
||||
"""
|
||||
Get the admin bit on a user.
|
||||
|
||||
Args:
|
||||
user_id (UserID): the (necessarily local) user to manipulate
|
||||
"""
|
||||
return self.store.is_server_admin(user)
|
||||
|
||||
def set_user_server_admin(self, user, admin):
|
||||
"""
|
||||
Set the admin bit on a user.
|
||||
|
||||
Args:
|
||||
user_id (UserID): the (necessarily local) user to manipulate
|
||||
admin (bool): whether or not the user should be an admin of this server
|
||||
"""
|
||||
return self.store.set_server_admin(user, admin)
|
||||
|
||||
async def export_user_data(self, user_id, writer):
|
||||
"""Write all data we have on the user to the given writer.
|
||||
|
||||
@@ -125,7 +76,7 @@ class AdminHandler(BaseHandler):
|
||||
The returned value is that returned by `writer.finished()`.
|
||||
"""
|
||||
# Get all rooms the user is in or has been in
|
||||
rooms = await self.store.get_rooms_for_user_where_membership_is(
|
||||
rooms = await self.store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id,
|
||||
membership_list=(
|
||||
Membership.JOIN,
|
||||
@@ -250,35 +201,26 @@ class ExfiltrationWriter(object):
|
||||
"""Interface used to specify how to write exported data.
|
||||
"""
|
||||
|
||||
def write_events(self, room_id, events):
|
||||
def write_events(self, room_id: str, events: List[FrozenEvent]):
|
||||
"""Write a batch of events for a room.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
events (list[FrozenEvent])
|
||||
"""
|
||||
pass
|
||||
|
||||
def write_state(self, room_id, event_id, state):
|
||||
def write_state(self, room_id: str, event_id: str, state: StateMap[FrozenEvent]):
|
||||
"""Write the state at the given event in the room.
|
||||
|
||||
This only gets called for backward extremities rather than for each
|
||||
event.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
event_id (str)
|
||||
state (dict[tuple[str, str], FrozenEvent])
|
||||
"""
|
||||
pass
|
||||
|
||||
def write_invite(self, room_id, event, state):
|
||||
def write_invite(self, room_id: str, event: FrozenEvent, state: StateMap[dict]):
|
||||
"""Write an invite for the room, with associated invite state.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
event (FrozenEvent)
|
||||
state (dict[tuple[str, str], dict]): A subset of the state at the
|
||||
room_id
|
||||
event
|
||||
state: A subset of the state at the
|
||||
invite, with a subset of the event keys (type, state_key
|
||||
content and sender)
|
||||
"""
|
||||
|
||||
@@ -816,6 +816,14 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_threepid(self, user_id, medium, address, validated_at):
|
||||
# check if medium has a valid value
|
||||
if medium not in ["email", "msisdn"]:
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=("'%s' is not a valid value for 'medium'" % (medium,)),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# 'Canonicalise' email addresses down to lower case.
|
||||
# We've now moving towards the homeserver being the entity that
|
||||
# is responsible for validating threepids used for resetting passwords
|
||||
|
||||
@@ -140,7 +140,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
user_id (str): The user ID to reject pending invites for.
|
||||
"""
|
||||
user = UserID.from_string(user_id)
|
||||
pending_invites = await self.store.get_invited_rooms_for_user(user_id)
|
||||
pending_invites = await self.store.get_invited_rooms_for_local_user(user_id)
|
||||
|
||||
for room in pending_invites:
|
||||
try:
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
@@ -39,6 +40,8 @@ from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_DEVICE_DISPLAY_NAME_LEN = 100
|
||||
|
||||
|
||||
class DeviceWorkerHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
@@ -225,6 +228,22 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
|
||||
return result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
|
||||
master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
|
||||
self_signing_key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
|
||||
return {
|
||||
"user_id": user_id,
|
||||
"stream_id": stream_id,
|
||||
"devices": devices,
|
||||
"master_key": master_key,
|
||||
"self_signing_key": self_signing_key,
|
||||
}
|
||||
|
||||
|
||||
class DeviceHandler(DeviceWorkerHandler):
|
||||
def __init__(self, hs):
|
||||
@@ -239,9 +258,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
federation_registry.register_edu_handler(
|
||||
"m.device_list_update", self.device_list_updater.incoming_device_list_update
|
||||
)
|
||||
federation_registry.register_query_handler(
|
||||
"user_devices", self.on_federation_query_user_devices
|
||||
)
|
||||
|
||||
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
||||
|
||||
@@ -391,9 +407,18 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
defer.Deferred:
|
||||
"""
|
||||
|
||||
# Reject a new displayname which is too long.
|
||||
new_display_name = content.get("display_name")
|
||||
if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Device display name is too long (max %i)"
|
||||
% (MAX_DEVICE_DISPLAY_NAME_LEN,),
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.store.update_device(
|
||||
user_id, device_id, new_display_name=content.get("display_name")
|
||||
user_id, device_id, new_display_name=new_display_name
|
||||
)
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
except errors.StoreError as e:
|
||||
@@ -456,22 +481,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
|
||||
master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
|
||||
self_signing_key = yield self.store.get_e2e_cross_signing_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
|
||||
return {
|
||||
"user_id": user_id,
|
||||
"stream_id": stream_id,
|
||||
"devices": devices,
|
||||
"master_key": master_key,
|
||||
"self_signing_key": self_signing_key,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_left_room(self, user, room_id):
|
||||
user_id = user.to_string()
|
||||
@@ -598,7 +607,13 @@ class DeviceListUpdater(object):
|
||||
# happens if we've missed updates.
|
||||
resync = yield self._need_to_do_resync(user_id, pending_updates)
|
||||
|
||||
logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
logger.info(
|
||||
"Received device list update for %s, requiring resync: %s. Devices: %s",
|
||||
user_id,
|
||||
resync,
|
||||
", ".join(u[0] for u in pending_updates),
|
||||
)
|
||||
|
||||
if resync:
|
||||
yield self.user_device_resync(user_id)
|
||||
|
||||
@@ -14,12 +14,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
get_active_span_text_map,
|
||||
log_kv,
|
||||
@@ -47,6 +49,8 @@ class DeviceMessageHandler(object):
|
||||
"m.direct_to_device", self.on_direct_to_device_edu
|
||||
)
|
||||
|
||||
self._device_list_updater = hs.get_device_handler().device_list_updater
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_direct_to_device_edu(self, origin, content):
|
||||
local_messages = {}
|
||||
@@ -65,6 +69,9 @@ class DeviceMessageHandler(object):
|
||||
logger.warning("Request for keys for non-local user %s", user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not by_device:
|
||||
continue
|
||||
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
@@ -73,8 +80,11 @@ class DeviceMessageHandler(object):
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
local_messages[user_id] = messages_by_device
|
||||
|
||||
yield self._check_for_unknown_devices(
|
||||
message_type, sender_user_id, by_device
|
||||
)
|
||||
|
||||
stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
|
||||
origin, message_id, local_messages
|
||||
@@ -84,6 +94,55 @@ class DeviceMessageHandler(object):
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_for_unknown_devices(
|
||||
self,
|
||||
message_type: str,
|
||||
sender_user_id: str,
|
||||
by_device: Dict[str, Dict[str, Any]],
|
||||
):
|
||||
"""Checks inbound device messages for unkown remote devices, and if
|
||||
found marks the remote cache for the user as stale.
|
||||
"""
|
||||
|
||||
if message_type != "m.room_key_request":
|
||||
return
|
||||
|
||||
# Get the sending device IDs
|
||||
requesting_device_ids = set()
|
||||
for message_content in by_device.values():
|
||||
device_id = message_content.get("requesting_device_id")
|
||||
requesting_device_ids.add(device_id)
|
||||
|
||||
# Check if we are tracking the devices of the remote user.
|
||||
room_ids = yield self.store.get_rooms_for_user(sender_user_id)
|
||||
if not room_ids:
|
||||
logger.info(
|
||||
"Received device message from remote device we don't"
|
||||
" share a room with: %s %s",
|
||||
sender_user_id,
|
||||
requesting_device_ids,
|
||||
)
|
||||
return
|
||||
|
||||
# If we are tracking check that we know about the sending
|
||||
# devices.
|
||||
cached_devices = yield self.store.get_cached_devices_for_user(sender_user_id)
|
||||
|
||||
unknown_devices = requesting_device_ids - set(cached_devices)
|
||||
if unknown_devices:
|
||||
logger.info(
|
||||
"Received device message from remote device not in our cache: %s %s",
|
||||
sender_user_id,
|
||||
unknown_devices,
|
||||
)
|
||||
yield self.store.mark_remote_user_device_cache_as_stale(sender_user_id)
|
||||
|
||||
# Immediately attempt a resync in the background
|
||||
run_in_background(
|
||||
self._device_list_updater.user_device_resync, sender_user_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_device_message(self, sender_user_id, message_type, messages):
|
||||
set_tag("number_of_messages", len(messages))
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
import logging
|
||||
import string
|
||||
from typing import List
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -28,7 +29,7 @@ from synapse.api.errors import (
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
||||
from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -81,13 +82,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_association(
|
||||
self,
|
||||
requester,
|
||||
room_alias,
|
||||
room_id,
|
||||
servers=None,
|
||||
send_event=True,
|
||||
check_membership=True,
|
||||
self, requester, room_alias, room_id, servers=None, check_membership=True,
|
||||
):
|
||||
"""Attempt to create a new alias
|
||||
|
||||
@@ -97,7 +92,6 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id (str)
|
||||
servers (list[str]|None): List of servers that others servers
|
||||
should try and join via
|
||||
send_event (bool): Whether to send an updated m.room.aliases event
|
||||
check_membership (bool): Whether to check if the user is in the room
|
||||
before the alias can be set (if the server's config requires it).
|
||||
|
||||
@@ -150,11 +144,9 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
||||
if send_event:
|
||||
yield self.send_room_alias_update_event(requester, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, requester, room_alias, send_event=True):
|
||||
def delete_association(self, requester, room_alias):
|
||||
"""Remove an alias from the directory
|
||||
|
||||
(this is only meant for human users; AS users should call
|
||||
@@ -163,9 +155,6 @@ class DirectoryHandler(BaseHandler):
|
||||
Args:
|
||||
requester (Requester):
|
||||
room_alias (RoomAlias):
|
||||
send_event (bool): Whether to send an updated m.room.aliases event.
|
||||
Note that, if we delete the canonical alias, we will always attempt
|
||||
to send an m.room.canonical_alias event
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: room id that the alias used to point to
|
||||
@@ -201,9 +190,6 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id = yield self._delete_association(room_alias)
|
||||
|
||||
try:
|
||||
if send_event:
|
||||
yield self.send_room_alias_update_event(requester, room_id)
|
||||
|
||||
yield self._update_canonical_alias(
|
||||
requester, requester.user.to_string(), room_id, room_alias
|
||||
)
|
||||
@@ -314,25 +300,50 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
|
||||
"""
|
||||
Send an updated canonical alias event if the removed alias was set as
|
||||
the canonical alias or listed in the alt_aliases field.
|
||||
"""
|
||||
alias_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.CanonicalAlias, ""
|
||||
)
|
||||
|
||||
alias_str = room_alias.to_string()
|
||||
if not alias_event or alias_event.content.get("alias", "") != alias_str:
|
||||
# There is no canonical alias, nothing to do.
|
||||
if not alias_event:
|
||||
return
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {},
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
# Obtain a mutable version of the event content.
|
||||
content = dict(alias_event.content)
|
||||
send_update = False
|
||||
|
||||
# Remove the alias property if it matches the removed alias.
|
||||
alias_str = room_alias.to_string()
|
||||
if alias_event.content.get("alias", "") == alias_str:
|
||||
send_update = True
|
||||
content.pop("alias", "")
|
||||
|
||||
# Filter alt_aliases for the removed alias.
|
||||
alt_aliases = content.pop("alt_aliases", None)
|
||||
# If the aliases are not a list (or not found) do not attempt to modify
|
||||
# the list.
|
||||
if isinstance(alt_aliases, list):
|
||||
send_update = True
|
||||
alt_aliases = [alias for alias in alt_aliases if alias != alias_str]
|
||||
if alt_aliases:
|
||||
content["alt_aliases"] = alt_aliases
|
||||
|
||||
if send_update:
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": content,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association_from_room_alias(self, room_alias):
|
||||
@@ -442,3 +453,19 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self.store.set_room_is_public_appservice(
|
||||
room_id, appservice_id, network_id, visibility == "public"
|
||||
)
|
||||
|
||||
async def get_aliases_for_room(
|
||||
self, requester: Requester, room_id: str
|
||||
) -> List[str]:
|
||||
"""
|
||||
Get a list of the aliases that currently point to this room on this server
|
||||
"""
|
||||
# allow access to server admins and current members of the room
|
||||
is_admin = await self.auth.is_server_admin(requester.user)
|
||||
if not is_admin:
|
||||
await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
|
||||
aliases = await self.store.get_aliases_for_room(room_id)
|
||||
return aliases
|
||||
|
||||
@@ -208,8 +208,9 @@ class E2eKeysHandler(object):
|
||||
)
|
||||
|
||||
user_devices = user_devices["devices"]
|
||||
user_results = results.setdefault(user_id, {})
|
||||
for device in user_devices:
|
||||
results[user_id] = {device["device_id"]: device["keys"]}
|
||||
user_results[device["device_id"]] = device["keys"]
|
||||
user_ids_updated.append(user_id)
|
||||
except Exception as e:
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -63,7 +63,7 @@ def _create_rerouter(func_name):
|
||||
return f
|
||||
|
||||
|
||||
class GroupsLocalHandler(object):
|
||||
class GroupsLocalWorkerHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
@@ -81,40 +81,17 @@ class GroupsLocalHandler(object):
|
||||
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
# The following functions merely route the query to the local groups server
|
||||
# or federation depending on if the group is local or remote
|
||||
|
||||
get_group_profile = _create_rerouter("get_group_profile")
|
||||
update_group_profile = _create_rerouter("update_group_profile")
|
||||
get_rooms_in_group = _create_rerouter("get_rooms_in_group")
|
||||
|
||||
get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
|
||||
|
||||
add_room_to_group = _create_rerouter("add_room_to_group")
|
||||
update_room_in_group = _create_rerouter("update_room_in_group")
|
||||
remove_room_from_group = _create_rerouter("remove_room_from_group")
|
||||
|
||||
update_group_summary_room = _create_rerouter("update_group_summary_room")
|
||||
delete_group_summary_room = _create_rerouter("delete_group_summary_room")
|
||||
|
||||
update_group_category = _create_rerouter("update_group_category")
|
||||
delete_group_category = _create_rerouter("delete_group_category")
|
||||
get_group_category = _create_rerouter("get_group_category")
|
||||
get_group_categories = _create_rerouter("get_group_categories")
|
||||
|
||||
update_group_summary_user = _create_rerouter("update_group_summary_user")
|
||||
delete_group_summary_user = _create_rerouter("delete_group_summary_user")
|
||||
|
||||
update_group_role = _create_rerouter("update_group_role")
|
||||
delete_group_role = _create_rerouter("delete_group_role")
|
||||
get_group_role = _create_rerouter("get_group_role")
|
||||
get_group_roles = _create_rerouter("get_group_roles")
|
||||
|
||||
set_group_join_policy = _create_rerouter("set_group_join_policy")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_summary(self, group_id, requester_user_id):
|
||||
"""Get the group summary for a group.
|
||||
@@ -130,6 +107,8 @@ class GroupsLocalHandler(object):
|
||||
res = yield self.transport_client.get_group_summary(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -167,6 +146,144 @@ class GroupsLocalHandler(object):
|
||||
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.get_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
return res
|
||||
|
||||
group_server_name = get_domain_from_id(group_id)
|
||||
|
||||
try:
|
||||
res = yield self.transport_client.get_users_in_group(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
chunk = res["chunk"]
|
||||
valid_entries = []
|
||||
for entry in chunk:
|
||||
g_user_id = entry["user_id"]
|
||||
attestation = entry.pop("attestation", {})
|
||||
try:
|
||||
if get_domain_from_id(g_user_id) != group_server_name:
|
||||
yield self.attestations.verify_attestation(
|
||||
attestation,
|
||||
group_id=group_id,
|
||||
user_id=g_user_id,
|
||||
server_name=get_domain_from_id(g_user_id),
|
||||
)
|
||||
valid_entries.append(entry)
|
||||
except Exception as e:
|
||||
logger.info("Failed to verify user is in group: %s", e)
|
||||
|
||||
res["chunk"] = valid_entries
|
||||
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_joined_groups(self, user_id):
|
||||
group_ids = yield self.store.get_joined_groups(user_id)
|
||||
return {"groups": group_ids}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_publicised_groups_for_user(self, user_id):
|
||||
if self.hs.is_mine_id(user_id):
|
||||
result = yield self.store.get_publicised_groups_for_user(user_id)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
result.extend(app_service.get_groups_for_user(user_id))
|
||||
|
||||
return {"groups": result}
|
||||
else:
|
||||
try:
|
||||
bulk_result = yield self.transport_client.bulk_get_publicised_groups(
|
||||
get_domain_from_id(user_id), [user_id]
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
result = bulk_result.get("users", {}).get(user_id)
|
||||
# TODO: Verify attestations
|
||||
return {"groups": result}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bulk_get_publicised_groups(self, user_ids, proxy=True):
|
||||
destinations = {}
|
||||
local_users = set()
|
||||
|
||||
for user_id in user_ids:
|
||||
if self.hs.is_mine_id(user_id):
|
||||
local_users.add(user_id)
|
||||
else:
|
||||
destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
|
||||
|
||||
if not proxy and destinations:
|
||||
raise SynapseError(400, "Some user_ids are not local")
|
||||
|
||||
results = {}
|
||||
failed_results = []
|
||||
for destination, dest_user_ids in iteritems(destinations):
|
||||
try:
|
||||
r = yield self.transport_client.bulk_get_publicised_groups(
|
||||
destination, list(dest_user_ids)
|
||||
)
|
||||
results.update(r["users"])
|
||||
except Exception:
|
||||
failed_results.extend(dest_user_ids)
|
||||
|
||||
for uid in local_users:
|
||||
results[uid] = yield self.store.get_publicised_groups_for_user(uid)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
results[uid].extend(app_service.get_groups_for_user(uid))
|
||||
|
||||
return {"users": results}
|
||||
|
||||
|
||||
class GroupsLocalHandler(GroupsLocalWorkerHandler):
|
||||
def __init__(self, hs):
|
||||
super(GroupsLocalHandler, self).__init__(hs)
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
# The following functions merely route the query to the local groups server
|
||||
# or federation depending on if the group is local or remote
|
||||
|
||||
update_group_profile = _create_rerouter("update_group_profile")
|
||||
|
||||
add_room_to_group = _create_rerouter("add_room_to_group")
|
||||
update_room_in_group = _create_rerouter("update_room_in_group")
|
||||
remove_room_from_group = _create_rerouter("remove_room_from_group")
|
||||
|
||||
update_group_summary_room = _create_rerouter("update_group_summary_room")
|
||||
delete_group_summary_room = _create_rerouter("delete_group_summary_room")
|
||||
|
||||
update_group_category = _create_rerouter("update_group_category")
|
||||
delete_group_category = _create_rerouter("delete_group_category")
|
||||
|
||||
update_group_summary_user = _create_rerouter("update_group_summary_user")
|
||||
delete_group_summary_user = _create_rerouter("delete_group_summary_user")
|
||||
|
||||
update_group_role = _create_rerouter("update_group_role")
|
||||
delete_group_role = _create_rerouter("delete_group_role")
|
||||
|
||||
set_group_join_policy = _create_rerouter("set_group_join_policy")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_group(self, group_id, user_id, content):
|
||||
"""Create a group
|
||||
@@ -190,6 +307,8 @@ class GroupsLocalHandler(object):
|
||||
res = yield self.transport_client.create_group(
|
||||
get_domain_from_id(group_id), group_id, user_id, content
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -215,46 +334,6 @@ class GroupsLocalHandler(object):
|
||||
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.get_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
return res
|
||||
|
||||
group_server_name = get_domain_from_id(group_id)
|
||||
|
||||
try:
|
||||
res = yield self.transport_client.get_users_in_group(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id
|
||||
)
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
chunk = res["chunk"]
|
||||
valid_entries = []
|
||||
for entry in chunk:
|
||||
g_user_id = entry["user_id"]
|
||||
attestation = entry.pop("attestation", {})
|
||||
try:
|
||||
if get_domain_from_id(g_user_id) != group_server_name:
|
||||
yield self.attestations.verify_attestation(
|
||||
attestation,
|
||||
group_id=group_id,
|
||||
user_id=g_user_id,
|
||||
server_name=get_domain_from_id(g_user_id),
|
||||
)
|
||||
valid_entries.append(entry)
|
||||
except Exception as e:
|
||||
logger.info("Failed to verify user is in group: %s", e)
|
||||
|
||||
res["chunk"] = valid_entries
|
||||
|
||||
return res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_group(self, group_id, user_id, content):
|
||||
"""Request to join a group
|
||||
@@ -271,6 +350,8 @@ class GroupsLocalHandler(object):
|
||||
res = yield self.transport_client.join_group(
|
||||
get_domain_from_id(group_id), group_id, user_id, content
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -315,6 +396,8 @@ class GroupsLocalHandler(object):
|
||||
res = yield self.transport_client.accept_group_invite(
|
||||
get_domain_from_id(group_id), group_id, user_id, content
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -361,6 +444,8 @@ class GroupsLocalHandler(object):
|
||||
requester_user_id,
|
||||
content,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -424,6 +509,8 @@ class GroupsLocalHandler(object):
|
||||
user_id,
|
||||
content,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
@@ -438,66 +525,3 @@ class GroupsLocalHandler(object):
|
||||
group_id, user_id, membership="leave"
|
||||
)
|
||||
self.notifier.on_new_event("groups_key", token, users=[user_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_joined_groups(self, user_id):
|
||||
group_ids = yield self.store.get_joined_groups(user_id)
|
||||
return {"groups": group_ids}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_publicised_groups_for_user(self, user_id):
|
||||
if self.hs.is_mine_id(user_id):
|
||||
result = yield self.store.get_publicised_groups_for_user(user_id)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
result.extend(app_service.get_groups_for_user(user_id))
|
||||
|
||||
return {"groups": result}
|
||||
else:
|
||||
try:
|
||||
bulk_result = yield self.transport_client.bulk_get_publicised_groups(
|
||||
get_domain_from_id(user_id), [user_id]
|
||||
)
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
result = bulk_result.get("users", {}).get(user_id)
|
||||
# TODO: Verify attestations
|
||||
return {"groups": result}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bulk_get_publicised_groups(self, user_ids, proxy=True):
|
||||
destinations = {}
|
||||
local_users = set()
|
||||
|
||||
for user_id in user_ids:
|
||||
if self.hs.is_mine_id(user_id):
|
||||
local_users.add(user_id)
|
||||
else:
|
||||
destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
|
||||
|
||||
if not proxy and destinations:
|
||||
raise SynapseError(400, "Some user_ids are not local")
|
||||
|
||||
results = {}
|
||||
failed_results = []
|
||||
for destination, dest_user_ids in iteritems(destinations):
|
||||
try:
|
||||
r = yield self.transport_client.bulk_get_publicised_groups(
|
||||
destination, list(dest_user_ids)
|
||||
)
|
||||
results.update(r["users"])
|
||||
except Exception:
|
||||
failed_results.extend(dest_user_ids)
|
||||
|
||||
for uid in local_users:
|
||||
results[uid] = yield self.store.get_publicised_groups_for_user(uid)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
results[uid].extend(app_service.get_groups_for_user(uid))
|
||||
|
||||
return {"users": results}
|
||||
|
||||
@@ -38,7 +38,7 @@ from synapse.api.errors import (
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.util.hash import sha256_and_url_safe_base64
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.stringutils import assert_valid_client_secret, random_string
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -84,6 +84,8 @@ class IdentityHandler(BaseHandler):
|
||||
raise SynapseError(
|
||||
400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
|
||||
)
|
||||
assert_valid_client_secret(client_secret)
|
||||
|
||||
session_id = creds.get("sid")
|
||||
if not session_id:
|
||||
raise SynapseError(
|
||||
|
||||
@@ -18,7 +18,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
@@ -101,7 +101,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
if include_archived:
|
||||
memberships.append(Membership.LEAVE)
|
||||
|
||||
room_list = await self.store.get_rooms_for_user_where_membership_is(
|
||||
room_list = await self.store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id=user_id, membership_list=memberships
|
||||
)
|
||||
|
||||
@@ -274,8 +274,11 @@ class InitialSyncHandler(BaseHandler):
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
membership, member_event_id = await self._check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, user_id, allow_departed_users=True,
|
||||
)
|
||||
is_peeking = member_event_id is None
|
||||
|
||||
@@ -433,25 +436,3 @@ class InitialSyncHandler(BaseHandler):
|
||||
ret["membership"] = membership
|
||||
|
||||
return ret
|
||||
|
||||
async def _check_in_room_or_world_readable(self, room_id, user_id):
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = await self.auth.check_user_was_in_room(room_id, user_id)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = await self.state_handler.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility
|
||||
and visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ from synapse.api.errors import (
|
||||
NotFoundError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.api.urls import ConsentURIBuilder
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -99,7 +99,9 @@ class MessageHandler(object):
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
) = yield self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, user_id, allow_departed_users=True
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
data = yield self.state.get_current_state(room_id, event_type, state_key)
|
||||
@@ -177,7 +179,9 @@ class MessageHandler(object):
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
) = yield self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, user_id, allow_departed_users=True
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
@@ -216,8 +220,8 @@ class MessageHandler(object):
|
||||
if not requester.app_service:
|
||||
# We check AS auth after fetching the room membership, as it
|
||||
# requires us to pull out all joined members anyway.
|
||||
membership, _ = yield self.auth.check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
membership, _ = yield self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, user_id, allow_departed_users=True
|
||||
)
|
||||
if membership != Membership.JOIN:
|
||||
raise NotImplementedError(
|
||||
@@ -459,7 +463,9 @@ class EventCreationHandler(object):
|
||||
room_version = event_dict["content"]["room_version"]
|
||||
else:
|
||||
try:
|
||||
room_version = yield self.store.get_room_version(event_dict["room_id"])
|
||||
room_version = yield self.store.get_room_version_id(
|
||||
event_dict["room_id"]
|
||||
)
|
||||
except NotFoundError:
|
||||
raise AuthError(403, "Unknown room")
|
||||
|
||||
@@ -788,7 +794,7 @@ class EventCreationHandler(object):
|
||||
):
|
||||
room_version = event.content.get("room_version", RoomVersions.V1.identifier)
|
||||
else:
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
room_version = yield self.store.get_room_version_id(event.room_id)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context
|
||||
@@ -930,10 +936,9 @@ class EventCreationHandler(object):
|
||||
# way? If we have been invited by a remote server, we need
|
||||
# to get them to sign the event.
|
||||
|
||||
returned_invite = yield federation_handler.send_invite(
|
||||
invitee.domain, event
|
||||
returned_invite = yield defer.ensureDeferred(
|
||||
federation_handler.send_invite(invitee.domain, event)
|
||||
)
|
||||
|
||||
event.unsigned.pop("room_state", None)
|
||||
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
@@ -962,9 +967,13 @@ class EventCreationHandler(object):
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
if event_auth.check_redaction(room_version, event, auth_events=auth_events):
|
||||
room_version = yield self.store.get_room_version_id(event.room_id)
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
|
||||
if event_auth.check_redaction(
|
||||
room_version_obj, event, auth_events=auth_events
|
||||
):
|
||||
# this user doesn't have 'redact' rights, so we need to do some more
|
||||
# checks on the original event. Let's start by checking the original
|
||||
# event exists.
|
||||
|
||||
@@ -88,6 +88,8 @@ class PaginationHandler(object):
|
||||
if hs.config.retention_enabled:
|
||||
# Run the purge jobs described in the configuration file.
|
||||
for job in hs.config.retention_purge_jobs:
|
||||
logger.info("Setting up purge job with config: %s", job)
|
||||
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
job["interval"],
|
||||
@@ -130,11 +132,22 @@ class PaginationHandler(object):
|
||||
else:
|
||||
include_null = False
|
||||
|
||||
logger.info(
|
||||
"[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)",
|
||||
min_ms,
|
||||
max_ms,
|
||||
include_null,
|
||||
)
|
||||
|
||||
rooms = yield self.store.get_rooms_for_retention_period_in_range(
|
||||
min_ms, max_ms, include_null
|
||||
)
|
||||
|
||||
logger.debug("[purge] Rooms to purge: %s", rooms)
|
||||
|
||||
for room_id, retention_policy in iteritems(rooms):
|
||||
logger.info("[purge] Attempting to purge messages in room %s", room_id)
|
||||
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
logger.warning(
|
||||
"[purge] not purging room %s as there's an ongoing purge running"
|
||||
@@ -156,7 +169,7 @@ class PaginationHandler(object):
|
||||
|
||||
stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts)
|
||||
|
||||
r = yield self.store.get_room_event_after_stream_ordering(
|
||||
r = yield self.store.get_room_event_before_stream_ordering(
|
||||
room_id, stream_ordering,
|
||||
)
|
||||
if not r:
|
||||
@@ -268,7 +281,7 @@ class PaginationHandler(object):
|
||||
"""Purge the given room from the database"""
|
||||
with (await self.pagination_lock.write(room_id)):
|
||||
# check we know about the room
|
||||
await self.store.get_room_version(room_id)
|
||||
await self.store.get_room_version_id(room_id)
|
||||
|
||||
# first check that we have no users in this room
|
||||
joined = await defer.maybeDeferred(
|
||||
@@ -322,7 +335,9 @@ class PaginationHandler(object):
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, user_id, allow_departed_users=True
|
||||
)
|
||||
|
||||
if source_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
|
||||
@@ -20,13 +20,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import MAX_USERID_LENGTH, LoginType
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
ConsentNotGivenError,
|
||||
RegistrationError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
||||
@@ -165,7 +159,7 @@ class RegistrationHandler(BaseHandler):
|
||||
Returns:
|
||||
Deferred[str]: user_id
|
||||
Raises:
|
||||
RegistrationError if there was a problem registering.
|
||||
SynapseError if there was a problem registering.
|
||||
"""
|
||||
yield self.check_registration_ratelimit(address)
|
||||
|
||||
@@ -174,7 +168,7 @@ class RegistrationHandler(BaseHandler):
|
||||
if password:
|
||||
password_hash = yield self._auth_handler.hash(password)
|
||||
|
||||
if localpart:
|
||||
if localpart is not None:
|
||||
yield self.check_username(localpart, guest_access_token=guest_access_token)
|
||||
|
||||
was_guest = guest_access_token is not None
|
||||
@@ -182,7 +176,7 @@ class RegistrationHandler(BaseHandler):
|
||||
if not was_guest:
|
||||
try:
|
||||
int(localpart)
|
||||
raise RegistrationError(
|
||||
raise SynapseError(
|
||||
400, "Numeric user IDs are reserved for guest users."
|
||||
)
|
||||
except ValueError:
|
||||
|
||||
@@ -29,10 +29,19 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events.utils import copy_power_levels_contents
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.types import (
|
||||
Requester,
|
||||
RoomAlias,
|
||||
RoomID,
|
||||
RoomStreamToken,
|
||||
StateMap,
|
||||
StreamToken,
|
||||
UserID,
|
||||
)
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -55,18 +64,21 @@ class RoomCreationHandler(BaseHandler):
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": True,
|
||||
"power_level_content_override": {"invite": 0},
|
||||
},
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": True,
|
||||
"guest_can_join": True,
|
||||
"power_level_content_override": {"invite": 0},
|
||||
},
|
||||
RoomCreationPreset.PUBLIC_CHAT: {
|
||||
"join_rules": JoinRules.PUBLIC,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": False,
|
||||
"power_level_content_override": {},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -92,13 +104,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upgrade_room(self, requester, old_room_id, new_version):
|
||||
def upgrade_room(
|
||||
self, requester: Requester, old_room_id: str, new_version: RoomVersion
|
||||
):
|
||||
"""Replace a room with a new room with a different version
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_version (unicode): the new room version to use
|
||||
requester: the user requesting the upgrade
|
||||
old_room_id: the id of the room to be replaced
|
||||
new_version: the new room version to use
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: the new room id
|
||||
@@ -143,7 +157,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
if r is None:
|
||||
raise NotFoundError("Unknown room id %s" % (old_room_id,))
|
||||
new_room_id = yield self._generate_room_id(
|
||||
creator_id=user_id, is_public=r["is_public"]
|
||||
creator_id=user_id, is_public=r["is_public"], room_version=new_version,
|
||||
)
|
||||
|
||||
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
|
||||
@@ -167,7 +181,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
old_room_version = yield self.store.get_room_version(old_room_id)
|
||||
old_room_version = yield self.store.get_room_version_id(old_room_id)
|
||||
yield self.auth.check_from_context(
|
||||
old_room_version, tombstone_event, tombstone_context
|
||||
)
|
||||
@@ -207,15 +221,19 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_upgraded_room_pls(
|
||||
self, requester, old_room_id, new_room_id, old_room_state,
|
||||
self,
|
||||
requester: Requester,
|
||||
old_room_id: str,
|
||||
new_room_id: str,
|
||||
old_room_state: StateMap[str],
|
||||
):
|
||||
"""Send updated power levels in both rooms after an upgrade
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (str): the id of the room to be replaced
|
||||
new_room_id (str): the id of the replacement room
|
||||
old_room_state (dict[tuple[str, str], str]): the state map for the old room
|
||||
requester: the user requesting the upgrade
|
||||
old_room_id: the id of the room to be replaced
|
||||
new_room_id: the id of the replacement room
|
||||
old_room_state: the state map for the old room
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
@@ -244,7 +262,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
for v in ("invite", "events_default"):
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
v,
|
||||
old_room_id,
|
||||
@@ -254,7 +272,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
pl_content[v] = restricted_level
|
||||
updated = True
|
||||
else:
|
||||
logger.info("Not setting level for %s (already %i)", v, current)
|
||||
logger.debug("Not setting level for %s (already %i)", v, current)
|
||||
|
||||
if updated:
|
||||
try:
|
||||
@@ -272,7 +290,16 @@ class RoomCreationHandler(BaseHandler):
|
||||
except AuthError as e:
|
||||
logger.warning("Unable to update PLs in old room: %s", e)
|
||||
|
||||
logger.info("Setting correct PLs in new room to %s", old_room_pl_state.content)
|
||||
new_pl_content = copy_power_levels_contents(old_room_pl_state.content)
|
||||
|
||||
# pre-msc2260 rooms may not have the right setting for aliases. If no other
|
||||
# value is set, set it now.
|
||||
events_default = new_pl_content.get("events_default", 0)
|
||||
new_pl_content.setdefault("events", {}).setdefault(
|
||||
EventTypes.Aliases, events_default
|
||||
)
|
||||
|
||||
logger.debug("Setting correct PLs in new room to %s", new_pl_content)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
@@ -280,25 +307,29 @@ class RoomCreationHandler(BaseHandler):
|
||||
"state_key": "",
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": old_room_pl_state.content,
|
||||
"content": new_pl_content,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def clone_existing_room(
|
||||
self, requester, old_room_id, new_room_id, new_room_version, tombstone_event_id
|
||||
self,
|
||||
requester: Requester,
|
||||
old_room_id: str,
|
||||
new_room_id: str,
|
||||
new_room_version: RoomVersion,
|
||||
tombstone_event_id: str,
|
||||
):
|
||||
"""Populate a new room based on an old room
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_room_id (unicode): the id to give the new room (should already have been
|
||||
requester: the user requesting the upgrade
|
||||
old_room_id : the id of the room to be replaced
|
||||
new_room_id: the id to give the new room (should already have been
|
||||
created with _gemerate_room_id())
|
||||
new_room_version (unicode): the new room version to use
|
||||
tombstone_event_id (unicode|str): the ID of the tombstone event in the old
|
||||
room.
|
||||
new_room_version: the new room version to use
|
||||
tombstone_event_id: the ID of the tombstone event in the old room.
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
@@ -308,7 +339,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
creation_content = {
|
||||
"room_version": new_room_version,
|
||||
"room_version": new_room_version.identifier,
|
||||
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
|
||||
}
|
||||
|
||||
@@ -332,7 +363,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.GuestAccess, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
(EventTypes.Encryption, ""),
|
||||
(EventTypes.RoomEncryption, ""),
|
||||
(EventTypes.ServerACL, ""),
|
||||
(EventTypes.RelatedGroups, ""),
|
||||
(EventTypes.PowerLevels, ""),
|
||||
@@ -349,6 +380,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
if old_event:
|
||||
initial_state[k] = old_event.content
|
||||
|
||||
# deep-copy the power-levels event before we start modifying it
|
||||
# note that if frozen_dicts are enabled, `power_levels` will be a frozen
|
||||
# dict so we can't just copy.deepcopy it.
|
||||
initial_state[
|
||||
(EventTypes.PowerLevels, "")
|
||||
] = power_levels = copy_power_levels_contents(
|
||||
initial_state[(EventTypes.PowerLevels, "")]
|
||||
)
|
||||
|
||||
# Resolve the minimum power level required to send any state event
|
||||
# We will give the upgrading user this power level temporarily (if necessary) such that
|
||||
# they are able to copy all of the state events over, then revert them back to their
|
||||
@@ -357,8 +397,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
# Copy over user power levels now as this will not be possible with >100PL users once
|
||||
# the room has been created
|
||||
|
||||
power_levels = initial_state[(EventTypes.PowerLevels, "")]
|
||||
|
||||
# Calculate the minimum power level needed to clone the room
|
||||
event_power_levels = power_levels.get("events", {})
|
||||
state_default = power_levels.get("state_default", 0)
|
||||
@@ -368,16 +406,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
# Raise the requester's power level in the new room if necessary
|
||||
current_power_level = power_levels["users"][user_id]
|
||||
if current_power_level < needed_power_level:
|
||||
# make sure we copy the event content rather than overwriting it.
|
||||
# note that if frozen_dicts are enabled, `power_levels` will be a frozen
|
||||
# dict so we can't just copy.deepcopy it.
|
||||
|
||||
new_power_levels = {k: v for k, v in power_levels.items() if k != "users"}
|
||||
new_power_levels["users"] = {
|
||||
k: v for k, v in power_levels.get("users", {}).items() if k != user_id
|
||||
}
|
||||
new_power_levels["users"][user_id] = needed_power_level
|
||||
initial_state[(EventTypes.PowerLevels, "")] = new_power_levels
|
||||
power_levels["users"][user_id] = needed_power_level
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
@@ -449,9 +478,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
for alias_str in aliases:
|
||||
alias = RoomAlias.from_string(alias_str)
|
||||
try:
|
||||
yield directory_handler.delete_association(
|
||||
requester, alias, send_event=False
|
||||
)
|
||||
yield directory_handler.delete_association(requester, alias)
|
||||
removed_aliases.append(alias_str)
|
||||
except SynapseError as e:
|
||||
logger.warning("Unable to remove alias %s from old room: %s", alias, e)
|
||||
@@ -482,7 +509,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
RoomAlias.from_string(alias),
|
||||
new_room_id,
|
||||
servers=(self.hs.hostname,),
|
||||
send_event=False,
|
||||
check_membership=False,
|
||||
)
|
||||
logger.info("Moved alias %s to new room", alias)
|
||||
@@ -553,9 +579,13 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
# Check whether the third party rules allows/changes the room create
|
||||
# request.
|
||||
yield self.third_party_event_rules.on_create_room(
|
||||
event_allowed = yield self.third_party_event_rules.on_create_room(
|
||||
requester, config, is_requester_admin=is_requester_admin
|
||||
)
|
||||
if not event_allowed:
|
||||
raise SynapseError(
|
||||
403, "You are not permitted to create rooms", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
if not is_requester_admin and not self.spam_checker.user_may_create_room(
|
||||
user_id
|
||||
@@ -565,14 +595,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
if ratelimit:
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
room_version = config.get(
|
||||
room_version_id = config.get(
|
||||
"room_version", self.config.default_room_version.identifier
|
||||
)
|
||||
|
||||
if not isinstance(room_version, string_types):
|
||||
if not isinstance(room_version_id, string_types):
|
||||
raise SynapseError(400, "room_version must be a string", Codes.BAD_JSON)
|
||||
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
if room_version is None:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Your homeserver does not support this room version",
|
||||
@@ -619,7 +650,9 @@ class RoomCreationHandler(BaseHandler):
|
||||
visibility = config.get("visibility", None)
|
||||
is_public = visibility == "public"
|
||||
|
||||
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
|
||||
room_id = yield self._generate_room_id(
|
||||
creator_id=user_id, is_public=is_public, room_version=room_version,
|
||||
)
|
||||
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
if room_alias:
|
||||
@@ -628,7 +661,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
room_id=room_id,
|
||||
room_alias=room_alias,
|
||||
servers=[self.hs.hostname],
|
||||
send_event=False,
|
||||
check_membership=False,
|
||||
)
|
||||
|
||||
@@ -648,7 +680,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
creation_content = config.get("creation_content", {})
|
||||
|
||||
# override any attempt to set room versions via the creation_content
|
||||
creation_content["room_version"] = room_version
|
||||
creation_content["room_version"] = room_version.identifier
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
@@ -753,7 +785,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def send(etype, content, **kwargs):
|
||||
event = create(etype, content, **kwargs)
|
||||
logger.info("Sending %s in new room", etype)
|
||||
logger.debug("Sending %s in new room", etype)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
creator, event, ratelimit=False
|
||||
)
|
||||
@@ -767,7 +799,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
creation_content.update({"creator": creator_id})
|
||||
yield send(etype=EventTypes.Create, content=creation_content)
|
||||
|
||||
logger.info("Sending %s in new room", EventTypes.Member)
|
||||
logger.debug("Sending %s in new room", EventTypes.Member)
|
||||
yield self.room_member_handler.update_membership(
|
||||
creator,
|
||||
creator.user,
|
||||
@@ -792,19 +824,28 @@ class RoomCreationHandler(BaseHandler):
|
||||
EventTypes.RoomHistoryVisibility: 100,
|
||||
EventTypes.CanonicalAlias: 50,
|
||||
EventTypes.RoomAvatar: 50,
|
||||
# MSC2260: Allow everybody to send alias events by default
|
||||
# This will be reudundant on pre-MSC2260 rooms, since the
|
||||
# aliases event is special-cased.
|
||||
EventTypes.Aliases: 0,
|
||||
EventTypes.Tombstone: 100,
|
||||
EventTypes.ServerACL: 100,
|
||||
},
|
||||
"events_default": 0,
|
||||
"state_default": 50,
|
||||
"ban": 50,
|
||||
"kick": 50,
|
||||
"redact": 50,
|
||||
"invite": 0,
|
||||
"invite": 50,
|
||||
}
|
||||
|
||||
if config["original_invitees_have_ops"]:
|
||||
for invitee in invite_list:
|
||||
power_level_content["users"][invitee] = 100
|
||||
|
||||
# Power levels overrides are defined per chat preset
|
||||
power_level_content.update(config["power_level_content_override"])
|
||||
|
||||
if power_level_content_override:
|
||||
power_level_content.update(power_level_content_override)
|
||||
|
||||
@@ -837,7 +878,9 @@ class RoomCreationHandler(BaseHandler):
|
||||
yield send(etype=etype, state_key=state_key, content=content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_room_id(self, creator_id, is_public):
|
||||
def _generate_room_id(
|
||||
self, creator_id: str, is_public: str, room_version: RoomVersion,
|
||||
):
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
attempts = 0
|
||||
@@ -851,6 +894,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
room_id=gen_room_id,
|
||||
room_creator_user_id=creator_id,
|
||||
is_public=is_public,
|
||||
room_version=room_version,
|
||||
)
|
||||
return gen_room_id
|
||||
except StoreError:
|
||||
|
||||
@@ -690,7 +690,7 @@ class RoomMemberHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_inviter(self, user_id, room_id):
|
||||
invite = yield self.store.get_invite_for_user_in_room(
|
||||
invite = yield self.store.get_invite_for_local_user_in_room(
|
||||
user_id=user_id, room_id=room_id
|
||||
)
|
||||
if invite:
|
||||
@@ -944,8 +944,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
# join dance for now, since we're kinda implicitly checking
|
||||
# that we are allowed to join when we decide whether or not we
|
||||
# need to do the invite/join dance.
|
||||
yield self.federation_handler.do_invite_join(
|
||||
remote_room_hosts, room_id, user.to_string(), content
|
||||
yield defer.ensureDeferred(
|
||||
self.federation_handler.do_invite_join(
|
||||
remote_room_hosts, room_id, user.to_string(), content
|
||||
)
|
||||
)
|
||||
yield self._user_joined_room(user, room_id)
|
||||
|
||||
@@ -982,8 +984,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
"""
|
||||
fed_handler = self.federation_handler
|
||||
try:
|
||||
ret = yield fed_handler.do_remotely_reject_invite(
|
||||
remote_room_hosts, room_id, target.to_string(), content=content,
|
||||
ret = yield defer.ensureDeferred(
|
||||
fed_handler.do_remotely_reject_invite(
|
||||
remote_room_hosts, room_id, target.to_string(), content=content,
|
||||
)
|
||||
)
|
||||
return ret
|
||||
except Exception as e:
|
||||
|
||||
@@ -24,6 +24,7 @@ from saml2.client import Saml2Client
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.config import ConfigError
|
||||
from synapse.http.servlet import parse_string
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.rest.client.v1.login import SSOAuthHandler
|
||||
from synapse.types import (
|
||||
UserID,
|
||||
@@ -31,6 +32,7 @@ from synapse.types import (
|
||||
mxid_localpart_allowed_characters,
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.iterutils import chunk_seq
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -59,7 +61,8 @@ class SamlHandler:
|
||||
|
||||
# plugin to do custom mapping from saml response to mxid
|
||||
self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
|
||||
hs.config.saml2_user_mapping_provider_config
|
||||
hs.config.saml2_user_mapping_provider_config,
|
||||
ModuleApi(hs, hs.get_auth_handler()),
|
||||
)
|
||||
|
||||
# identifier for the external_ids table
|
||||
@@ -112,10 +115,10 @@ class SamlHandler:
|
||||
# the dict.
|
||||
self.expire_sessions()
|
||||
|
||||
user_id = await self._map_saml_response_to_user(resp_bytes)
|
||||
user_id = await self._map_saml_response_to_user(resp_bytes, relay_state)
|
||||
self._sso_auth_handler.complete_sso_login(user_id, request, relay_state)
|
||||
|
||||
async def _map_saml_response_to_user(self, resp_bytes):
|
||||
async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url):
|
||||
try:
|
||||
saml2_auth = self._saml_client.parse_authn_request_response(
|
||||
resp_bytes,
|
||||
@@ -130,17 +133,28 @@ class SamlHandler:
|
||||
logger.warning("SAML2 response was not signed")
|
||||
raise SynapseError(400, "SAML2 response was not signed")
|
||||
|
||||
logger.info("SAML2 response: %s", saml2_auth.origxml)
|
||||
logger.debug("SAML2 response: %s", saml2_auth.origxml)
|
||||
for assertion in saml2_auth.assertions:
|
||||
# kibana limits the length of a log field, whereas this is all rather
|
||||
# useful, so split it up.
|
||||
count = 0
|
||||
for part in chunk_seq(str(assertion), 10000):
|
||||
logger.info(
|
||||
"SAML2 assertion: %s%s", "(%i)..." % (count,) if count else "", part
|
||||
)
|
||||
count += 1
|
||||
|
||||
logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
|
||||
|
||||
try:
|
||||
remote_user_id = saml2_auth.ava["uid"][0]
|
||||
except KeyError:
|
||||
logger.warning("SAML2 response lacks a 'uid' attestation")
|
||||
raise SynapseError(400, "'uid' not in SAML2 response")
|
||||
|
||||
self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None)
|
||||
|
||||
remote_user_id = self._user_mapping_provider.get_remote_user_id(
|
||||
saml2_auth, client_redirect_url
|
||||
)
|
||||
|
||||
if not remote_user_id:
|
||||
raise Exception("Failed to extract remote user id from SAML response")
|
||||
|
||||
with (await self._mapping_lock.queue(self._auth_provider_id)):
|
||||
# first of all, check if we already have a mapping for this user
|
||||
logger.info(
|
||||
@@ -183,7 +197,7 @@ class SamlHandler:
|
||||
# Map saml response to user attributes using the configured mapping provider
|
||||
for i in range(1000):
|
||||
attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes(
|
||||
saml2_auth, i
|
||||
saml2_auth, i, client_redirect_url=client_redirect_url,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
@@ -216,6 +230,8 @@ class SamlHandler:
|
||||
500, "Unable to generate a Matrix ID from the SAML response"
|
||||
)
|
||||
|
||||
logger.info("Mapped SAML user to local part %s", localpart)
|
||||
|
||||
registered_user_id = await self._registration_handler.register_user(
|
||||
localpart=localpart, default_display_name=displayname
|
||||
)
|
||||
@@ -265,17 +281,35 @@ class SamlConfig(object):
|
||||
class DefaultSamlMappingProvider(object):
|
||||
__version__ = "0.0.1"
|
||||
|
||||
def __init__(self, parsed_config: SamlConfig):
|
||||
def __init__(self, parsed_config: SamlConfig, module_api: ModuleApi):
|
||||
"""The default SAML user mapping provider
|
||||
|
||||
Args:
|
||||
parsed_config: Module configuration
|
||||
module_api: module api proxy
|
||||
"""
|
||||
self._mxid_source_attribute = parsed_config.mxid_source_attribute
|
||||
self._mxid_mapper = parsed_config.mxid_mapper
|
||||
|
||||
self._grandfathered_mxid_source_attribute = (
|
||||
module_api._hs.config.saml2_grandfathered_mxid_source_attribute
|
||||
)
|
||||
|
||||
def get_remote_user_id(
|
||||
self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str
|
||||
):
|
||||
"""Extracts the remote user id from the SAML response"""
|
||||
try:
|
||||
return saml_response.ava["uid"][0]
|
||||
except KeyError:
|
||||
logger.warning("SAML2 response lacks a 'uid' attestation")
|
||||
raise SynapseError(400, "'uid' not in SAML2 response")
|
||||
|
||||
def saml_response_to_user_attributes(
|
||||
self, saml_response: saml2.response.AuthnResponse, failures: int = 0,
|
||||
self,
|
||||
saml_response: saml2.response.AuthnResponse,
|
||||
failures: int,
|
||||
client_redirect_url: str,
|
||||
) -> dict:
|
||||
"""Maps some text from a SAML response to attributes of a new user
|
||||
|
||||
@@ -285,6 +319,8 @@ class DefaultSamlMappingProvider(object):
|
||||
failures: How many times a call to this function with this
|
||||
saml_response has resulted in a failure
|
||||
|
||||
client_redirect_url: where the client wants to redirect to
|
||||
|
||||
Returns:
|
||||
dict: A dict containing new user attributes. Possible keys:
|
||||
* mxid_localpart (str): Required. The localpart of the user's mxid
|
||||
|
||||
@@ -179,7 +179,7 @@ class SearchHandler(BaseHandler):
|
||||
search_filter = Filter(filter_dict)
|
||||
|
||||
# TODO: Search through left rooms too
|
||||
rooms = yield self.store.get_rooms_for_user_where_membership_is(
|
||||
rooms = yield self.store.get_rooms_for_local_user_where_membership_is(
|
||||
user.to_string(),
|
||||
membership_list=[Membership.JOIN],
|
||||
# membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
|
||||
|
||||
@@ -286,7 +286,7 @@ class StatsHandler(StateDeltasHandler):
|
||||
room_state["history_visibility"] = event_content.get(
|
||||
"history_visibility"
|
||||
)
|
||||
elif typ == EventTypes.Encryption:
|
||||
elif typ == EventTypes.RoomEncryption:
|
||||
room_state["encryption"] = event_content.get("algorithm")
|
||||
elif typ == EventTypes.Name:
|
||||
room_state["name"] = event_content.get("name")
|
||||
@@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
|
||||
room_state["guest_access"] = event_content.get("guest_access")
|
||||
|
||||
for room_id, state in room_to_state_updates.items():
|
||||
logger.info("Updating room_stats_state for %s: %s", room_id, state)
|
||||
logger.debug("Updating room_stats_state for %s: %s", room_id, state)
|
||||
yield self.store.update_room_state(room_id, state)
|
||||
|
||||
return room_to_stats_deltas, user_to_stats_deltas
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user