Compare commits
576 Commits
travis/nul
...
dinsic
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
84e695f506 | ||
|
|
91694227b6 | ||
|
|
7a95d8fff9 | ||
|
|
99eec6d2d5 | ||
|
|
e5df12ad16 | ||
|
|
70714595bc | ||
|
|
62f5e3bc4c | ||
|
|
63d0d21a28 | ||
|
|
8551b4f336 | ||
|
|
359aed4168 | ||
|
|
cd47482257 | ||
|
|
a9567ee1a6 | ||
|
|
9502bd8d78 | ||
|
|
235271be4f | ||
|
|
0c6500a08b | ||
|
|
c862d5baf6 | ||
|
|
35ec13baab | ||
|
|
c4e56a8ee9 | ||
|
|
d2bb51080e | ||
|
|
0fda4e2e50 | ||
|
|
8ced9a2f58 | ||
|
|
bbd6208b3e | ||
|
|
36c61df659 | ||
|
|
132887db8c | ||
|
|
bec6d9e090 | ||
|
|
8a47155763 | ||
|
|
ffa30b507f | ||
|
|
00b47fdfc7 | ||
|
|
2526b79ce6 | ||
|
|
aea03c9d73 | ||
|
|
ddf256c77f | ||
|
|
dd92685179 | ||
|
|
0bb375c124 | ||
|
|
ea5f86304e | ||
|
|
d1a78ba2a3 | ||
|
|
dcdfdb3f17 | ||
|
|
4dc2eda1d3 | ||
|
|
13ad3f5ab1 | ||
|
|
28803ad56a | ||
|
|
446fb64d50 | ||
|
|
34dd738e4b | ||
|
|
048f86ef5d | ||
|
|
3bc4ea841d | ||
|
|
4e954f4bbd | ||
|
|
f10b6f7385 | ||
|
|
9fc40f7b3f | ||
|
|
cf22aae355 | ||
|
|
6b9d27d793 | ||
|
|
25fff95448 | ||
|
|
5fe0cea37e | ||
|
|
95b5d4ad54 | ||
|
|
4dd7de17b7 | ||
|
|
8b44097771 | ||
|
|
6b83a1826c | ||
|
|
aeda7fde78 | ||
|
|
d085e0df2a | ||
|
|
8d28817eac | ||
|
|
aa3ba41933 | ||
|
|
724ddaddb3 | ||
|
|
8636ec042b | ||
|
|
78b737ff71 | ||
|
|
a7a8bbf502 | ||
|
|
f304996c61 | ||
|
|
7c644198c8 | ||
|
|
ee0ee97447 | ||
|
|
f370fd01ec | ||
|
|
1ef3627b05 | ||
|
|
fbc5b58d21 | ||
|
|
84875301b6 | ||
|
|
18f0b69c49 | ||
|
|
4875746d82 | ||
|
|
fa4efb5967 | ||
|
|
c1bc48f9d4 | ||
|
|
9b3c69f661 | ||
|
|
7157276124 | ||
|
|
1532369dcd | ||
|
|
145291108d | ||
|
|
cefc5542fb | ||
|
|
2a1f35193b | ||
|
|
a09767d57d | ||
|
|
bc0fd8f170 | ||
|
|
d36a876d2d | ||
|
|
f7339d42ee | ||
|
|
8aea2c3be1 | ||
|
|
1cd0ecc1f2 | ||
|
|
19a4298a51 | ||
|
|
073dd7778e | ||
|
|
00736c8935 | ||
|
|
112a48a5aa | ||
|
|
53aff08ebe | ||
|
|
8b2f655589 | ||
|
|
bd8448ccb2 | ||
|
|
110608d1d6 | ||
|
|
5a28504c08 | ||
|
|
9bb7cec90f | ||
|
|
8a8644045f | ||
|
|
08d4534771 | ||
|
|
c8349787b7 | ||
|
|
e5e690041d | ||
|
|
fb1b76ff4c | ||
|
|
40ea934de0 | ||
|
|
5c4296b47a | ||
|
|
8b2543c0ec | ||
|
|
bd59e154c1 | ||
|
|
66378d13ce | ||
|
|
65d3652cb7 | ||
|
|
f874b16b2e | ||
|
|
c644755123 | ||
|
|
c7964e7430 | ||
|
|
3c9bb86fde | ||
|
|
304a1376c2 | ||
|
|
aded5cfb72 | ||
|
|
57bd5cfc9a | ||
|
|
ad566df746 | ||
|
|
c4759cd041 | ||
|
|
4dda68da9a | ||
|
|
adf3eee668 | ||
|
|
a6cf1e079e | ||
|
|
996cf15d42 | ||
|
|
27cd0b3037 | ||
|
|
4f68188d0b | ||
|
|
26a27e06b9 | ||
|
|
5283f65316 | ||
|
|
4a1b749582 | ||
|
|
e0c56d6527 | ||
|
|
7c2d936802 | ||
|
|
1c7628eb3a | ||
|
|
8400e5f624 | ||
|
|
02a659ae30 | ||
|
|
a55b2efca1 | ||
|
|
d907e8f599 | ||
|
|
97174780ce | ||
|
|
9532eb55ec | ||
|
|
a766c41d25 | ||
|
|
426218323b | ||
|
|
453aaaadc0 | ||
|
|
10383e6e6f | ||
|
|
3c8262b181 | ||
|
|
6bac9ca6d7 | ||
|
|
49e01e5710 | ||
|
|
78e74ab8a1 | ||
|
|
0167447965 | ||
|
|
a6b1817940 | ||
|
|
db74c4fc6c | ||
|
|
81b8fdedf2 | ||
|
|
19780a521e | ||
|
|
389aa20170 | ||
|
|
c10226581a | ||
|
|
10510f1e7e | ||
|
|
9ece96f5c8 | ||
|
|
1924848dfa | ||
|
|
deee82aebc | ||
|
|
0e63dd89a4 | ||
|
|
d331119758 | ||
|
|
48748c00c4 | ||
|
|
1fb6f68616 | ||
|
|
8d0bd9bb60 | ||
|
|
c413540fb9 | ||
|
|
e016681221 | ||
|
|
efe7b3176e | ||
|
|
8da0d83a54 | ||
|
|
d11c634ced | ||
|
|
9bc7768ad3 | ||
|
|
43badd2cd4 | ||
|
|
9cce175bf0 | ||
|
|
4f0e9a3f01 | ||
|
|
35eea39c8c | ||
|
|
8d16321edc | ||
|
|
028f674cd3 | ||
|
|
4914a88829 | ||
|
|
88d7182ada | ||
|
|
c2b6e945e1 | ||
|
|
7c455a86bc | ||
|
|
4f581faa98 | ||
|
|
2d1d7b7e6f | ||
|
|
a11865016e | ||
|
|
1b8cb64393 | ||
|
|
8acde3dc47 | ||
|
|
ed872db8df | ||
|
|
3719680ee4 | ||
|
|
9fbb20a531 | ||
|
|
833c406b9b | ||
|
|
837e32ef55 | ||
|
|
f868c8df03 | ||
|
|
3b6645d3bf | ||
|
|
71063a69b8 | ||
|
|
89d3d7b2c0 | ||
|
|
8f06344e11 | ||
|
|
7f08a3523a | ||
|
|
cb3b381fcb | ||
|
|
42555bc18b | ||
|
|
7898a1a48d | ||
|
|
64fa928792 | ||
|
|
b9c43c8463 | ||
|
|
99d3497949 | ||
|
|
2eb47e5ee7 | ||
|
|
a39be79216 | ||
|
|
6362e3af14 | ||
|
|
ccbc9e5e17 | ||
|
|
d51ca9d9b3 | ||
|
|
fe13bd52ac | ||
|
|
7603a706eb | ||
|
|
f8a45302c9 | ||
|
|
94f6c674df | ||
|
|
75538813fc | ||
|
|
fb98c05e03 | ||
|
|
b4f1cd31f4 | ||
|
|
95ab2eb4a1 | ||
|
|
e2dfb922e1 | ||
|
|
0a2f522644 | ||
|
|
d53faa40e9 | ||
|
|
4650526b5e | ||
|
|
40596aec0e | ||
|
|
26713515de | ||
|
|
804f26a9ff | ||
|
|
a412be2bc7 | ||
|
|
dbbaf25dd3 | ||
|
|
bc3d6b918b | ||
|
|
d18e4ea0d4 | ||
|
|
cea9750d11 | ||
|
|
14f13babb0 | ||
|
|
2615c6bd9e | ||
|
|
016af01598 | ||
|
|
aa530e6800 | ||
|
|
dae224a73f | ||
|
|
b4189b112f | ||
|
|
116f7778f4 | ||
|
|
f6dd12d1e2 | ||
|
|
2f62e1f6ff | ||
|
|
d1d38081a7 | ||
|
|
1cc5fc1f6c | ||
|
|
ac3cc32367 | ||
|
|
df9c100542 | ||
|
|
4d08b8f30c | ||
|
|
cb683d3e3c | ||
|
|
5bdb189f86 | ||
|
|
b2b90b7d34 | ||
|
|
a3f2d000e0 | ||
|
|
c5d60eadd5 | ||
|
|
def5ea4062 | ||
|
|
dce6e9e0c1 | ||
|
|
06a1f3e207 | ||
|
|
fec2dcb1a5 | ||
|
|
0a56966f7d | ||
|
|
0d67a8cd9d | ||
|
|
fe2294ec8d | ||
|
|
4bd67db100 | ||
|
|
fa4b54aca5 | ||
|
|
6f9f08005c | ||
|
|
2198b7ce2a | ||
|
|
4e75c5e02a | ||
|
|
ed6138461b | ||
|
|
be452fc9ac | ||
|
|
7f81b967ca | ||
|
|
862b2f9ad5 | ||
|
|
dc72b90cd6 | ||
|
|
37057d5d60 | ||
|
|
2889b05554 | ||
|
|
fde37e4e98 | ||
|
|
220a733d73 | ||
|
|
d828d1dc57 | ||
|
|
93003aa172 | ||
|
|
d16c6375fe | ||
|
|
37b165620d | ||
|
|
3600f5568b | ||
|
|
d8fdba7bfb | ||
|
|
58cce39f3a | ||
|
|
c605da97bf | ||
|
|
fe79b5e521 | ||
|
|
2ae3cc287e | ||
|
|
e975b15101 | ||
|
|
4d794dae21 | ||
|
|
e9981d58ca | ||
|
|
31d44ec4bd | ||
|
|
39bbf6a4a5 | ||
|
|
5037326d66 | ||
|
|
6bfc5ad3a1 | ||
|
|
0c2362861e | ||
|
|
847b9dcd1c | ||
|
|
3e1af5109c | ||
|
|
8ea2f756a9 | ||
|
|
a82c96b87f | ||
|
|
099829d5a9 | ||
|
|
99113e40ba | ||
|
|
c831748f4d | ||
|
|
9315802221 | ||
|
|
f5c7f90d72 | ||
|
|
e2c3660a0f | ||
|
|
06eb408da5 | ||
|
|
7386c35f58 | ||
|
|
98f438b52a | ||
|
|
9b8cd66524 | ||
|
|
9f5268388a | ||
|
|
6574d4ad0a | ||
|
|
1d818fde14 | ||
|
|
6ebc08c09d | ||
|
|
df9d900544 | ||
|
|
0b6bc36402 | ||
|
|
8824325b82 | ||
|
|
57b3751918 | ||
|
|
5ac75fc9a2 | ||
|
|
e2c46ed851 | ||
|
|
04710cc2d7 | ||
|
|
54d50fbfdf | ||
|
|
06675db684 | ||
|
|
6cdfb0207e | ||
|
|
e9e5d3392d | ||
|
|
cb967e2346 | ||
|
|
45f5d8f3fd | ||
|
|
468bd090ff | ||
|
|
5c1ece0ffc | ||
|
|
640fcbb07f | ||
|
|
123918b739 | ||
|
|
8d92329214 | ||
|
|
3dcf2feba8 | ||
|
|
8541db741a | ||
|
|
46c8f7a517 | ||
|
|
67e0631f8f | ||
|
|
d7add713a8 | ||
|
|
532b825ed9 | ||
|
|
7e8e683754 | ||
|
|
d79c9994f4 | ||
|
|
30858ff461 | ||
|
|
58c8ed5b0d | ||
|
|
f76d407ef3 | ||
|
|
7ddbbc45b7 | ||
|
|
0729ef01f8 | ||
|
|
ecaa299cab | ||
|
|
2ec2809460 | ||
|
|
f795595e95 | ||
|
|
878b00c395 | ||
|
|
9b6f72663e | ||
|
|
540f40f0cd | ||
|
|
5726378ece | ||
|
|
7e1c7cc274 | ||
|
|
4aba561c65 | ||
|
|
52839886d6 | ||
|
|
dde07c6859 | ||
|
|
a97d4e218a | ||
|
|
ddd30f44a0 | ||
|
|
ba17de7fbc | ||
|
|
119c9c10b0 | ||
|
|
d0bba35197 | ||
|
|
4ccdbfcdb1 | ||
|
|
bc4b2ecf70 | ||
|
|
0b4f4cb0b4 | ||
|
|
338dca58c0 | ||
|
|
6dac0e738c | ||
|
|
2d4853039f | ||
|
|
56f07d980a | ||
|
|
fa1b293da2 | ||
|
|
cbcfd642a0 | ||
|
|
b825d1c800 | ||
|
|
dd64b9dbdd | ||
|
|
dba9152d15 | ||
|
|
d16f5574b6 | ||
|
|
4cb577c23f | ||
|
|
8c41c04ee4 | ||
|
|
753b1270da | ||
|
|
6368150a74 | ||
|
|
ec24108cc2 | ||
|
|
895b79ac2e | ||
|
|
b75537beaf | ||
|
|
84660d91b2 | ||
|
|
cc187f9337 | ||
|
|
2e052110ee | ||
|
|
85d1e03b9d | ||
|
|
64365fcbdd | ||
|
|
4a9eba9576 | ||
|
|
c448f35de2 | ||
|
|
7b0e804a4a | ||
|
|
50cd07a836 | ||
|
|
ef13dc4846 | ||
|
|
de7672b78f | ||
|
|
7dfc3c327c | ||
|
|
42cea6b437 | ||
|
|
d9105b5ed8 | ||
|
|
6fbf2ae9a8 | ||
|
|
ed2b5b77f2 | ||
|
|
6d54f1534a | ||
|
|
d6e2f9f9da | ||
|
|
f608ddbe5c | ||
|
|
38e3d9ed67 | ||
|
|
aa5c42f5bc | ||
|
|
8bcb78891e | ||
|
|
f304f1a574 | ||
|
|
532ba44add | ||
|
|
10e3ed83e9 | ||
|
|
622ecec24b | ||
|
|
09ea63ae7a | ||
|
|
f059a91085 | ||
|
|
66f7588f87 | ||
|
|
b4f3d70b21 | ||
|
|
3d031c211d | ||
|
|
371296443f | ||
|
|
1973eb11d2 | ||
|
|
26c004129f | ||
|
|
0cc91efd6a | ||
|
|
fe6ac9c5d7 | ||
|
|
96bd70f6d0 | ||
|
|
3a9c405a0f | ||
|
|
909ceecc28 | ||
|
|
70da5202ba | ||
|
|
34bbbe81a6 | ||
|
|
2d979e639d | ||
|
|
d296cdc9dd | ||
|
|
8df16a8aee | ||
|
|
7e07dc429f | ||
|
|
b3e5db402d | ||
|
|
5d3ed79944 | ||
|
|
60041eac4b | ||
|
|
0a4c135f68 | ||
|
|
7f025eb425 | ||
|
|
15b7a84aa8 | ||
|
|
2f61dd058d | ||
|
|
e6218e4880 | ||
|
|
7d71975e6a | ||
|
|
0fcf7e5c57 | ||
|
|
9bf49abc07 | ||
|
|
dd747ba045 | ||
|
|
b00a8d870c | ||
|
|
8fa09c7479 | ||
|
|
d94873d525 | ||
|
|
f2fa172375 | ||
|
|
8c0ebe3026 | ||
|
|
53dd358c83 | ||
|
|
f5c944c7f2 | ||
|
|
aefb7a1146 | ||
|
|
479b7b1eff | ||
|
|
f04ee0b351 | ||
|
|
164798ec32 | ||
|
|
b7d7d20a38 | ||
|
|
2bb6d85736 | ||
|
|
0f0671e5e0 | ||
|
|
8eb9f37a01 | ||
|
|
ea89e73ebf | ||
|
|
68a9d1fc34 | ||
|
|
feae387576 | ||
|
|
e64f7c0188 | ||
|
|
b85ff4b894 | ||
|
|
4eca8d3fb3 | ||
|
|
aa3dc78f65 | ||
|
|
de874364e7 | ||
|
|
7e94e2ad94 | ||
|
|
e7ec6f78ca | ||
|
|
7a9aa4b81b | ||
|
|
5827e976fe | ||
|
|
44c0661d97 | ||
|
|
85db96cc81 | ||
|
|
3271742905 | ||
|
|
0a23bf442f | ||
|
|
9b13038d05 | ||
|
|
dfe09ec313 | ||
|
|
13bc1e5307 | ||
|
|
70e039c7ae | ||
|
|
2712a9ef8f | ||
|
|
da757b7759 | ||
|
|
59bc7debf0 | ||
|
|
cf68593544 | ||
|
|
9cc95fd0a5 | ||
|
|
82886e4c8f | ||
|
|
08919847c1 | ||
|
|
c3acc45a87 | ||
|
|
ae5bb32ad0 | ||
|
|
7ed3232b08 | ||
|
|
6e7488ce11 | ||
|
|
41585e1340 | ||
|
|
9498cd3e7b | ||
|
|
c7503f8f33 | ||
|
|
9d8baa1595 | ||
|
|
4ff8486f0f | ||
|
|
2669e494e0 | ||
|
|
b6d8a808a4 | ||
|
|
0cb5d34756 | ||
|
|
650761666d | ||
|
|
aa2a4b4b42 | ||
|
|
022469d819 | ||
|
|
45d06c754a | ||
|
|
dbd0821c43 | ||
|
|
0476852fc6 | ||
|
|
1d11d9323d | ||
|
|
261e4f2542 | ||
|
|
11728561f3 | ||
|
|
9d57abcadd | ||
|
|
cb0bbde981 | ||
|
|
abc97bd1de | ||
|
|
ee238254a0 | ||
|
|
0125b5d002 | ||
|
|
fe265fe990 | ||
|
|
7735eee41d | ||
|
|
3d0faa39fb | ||
|
|
fd28d13e19 | ||
|
|
d18731e252 | ||
|
|
81beae30b8 | ||
|
|
11f1bace3c | ||
|
|
1e8cfc9e77 | ||
|
|
488ed3e444 | ||
|
|
c3ec84dbcd | ||
|
|
0783801659 | ||
|
|
9f2fd29c14 | ||
|
|
6372dff771 | ||
|
|
b3e346f40c | ||
|
|
fb47ce3e6a | ||
|
|
debf04556b | ||
|
|
907a62df28 | ||
|
|
41b987cbc5 | ||
|
|
5c74ab4064 | ||
|
|
06820250c9 | ||
|
|
383c4ae59c | ||
|
|
f639ac143d | ||
|
|
ad0424bab0 | ||
|
|
2992125561 | ||
|
|
ef56b6e27c | ||
|
|
53d6245529 | ||
|
|
25e471dac3 | ||
|
|
76fca1730e | ||
|
|
32e4420a66 | ||
|
|
79b2583f1b | ||
|
|
8a24c4eee5 | ||
|
|
f93cb7410d | ||
|
|
50d5a97c1b | ||
|
|
c06932a029 | ||
|
|
3a62cacfb0 | ||
|
|
4d55b16faa | ||
|
|
105709bf32 | ||
|
|
d7fad867fa | ||
|
|
8fddcf703e | ||
|
|
e2adb360eb | ||
|
|
47ed4a4aa7 | ||
|
|
7fafa838ae | ||
|
|
de341bec1b | ||
|
|
643c89d497 | ||
|
|
6554253f48 | ||
|
|
3add16df49 | ||
|
|
dde01efbcb | ||
|
|
22e416b726 | ||
|
|
b4b7c80181 | ||
|
|
5fc3477fd3 | ||
|
|
8743f42b49 | ||
|
|
7285afa4be | ||
|
|
b22a53e357 | ||
|
|
3c446d0a81 | ||
|
|
240e940c3f | ||
|
|
969ed2e49d | ||
|
|
1147ce7e18 | ||
|
|
0d2b7fdcec | ||
|
|
4e12b10c7c | ||
|
|
e654230a51 | ||
|
|
ef5193e0cb | ||
|
|
7b3959c7f3 | ||
|
|
2e4a6c5aab | ||
|
|
e3eb2cfe8b | ||
|
|
5c341c99f6 | ||
|
|
739d3500fe | ||
|
|
0e2d70e101 | ||
|
|
82c4fd7226 | ||
|
|
e446077478 | ||
|
|
d82c89ac22 | ||
|
|
75b25b3f1f | ||
|
|
1df10d8814 | ||
|
|
8f9340d248 | ||
|
|
c5034cd4b0 | ||
|
|
f7f937d051 | ||
|
|
e52b5d94a9 | ||
|
|
d90f27a21f | ||
|
|
03cf9710e3 | ||
|
|
1dcdd8d568 | ||
|
|
4344fb1faf | ||
|
|
846577ebde | ||
|
|
3869981227 | ||
|
|
fa80b492a5 | ||
|
|
c776c52eed | ||
|
|
b424c16f50 | ||
|
|
313a489fc9 | ||
|
|
4b090cb273 | ||
|
|
3f79378d4b |
@@ -36,8 +36,6 @@ steps:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
|
||||
- wait
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e check-sampleconfig"
|
||||
@@ -46,6 +44,8 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- wait
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py27,codecov"
|
||||
@@ -56,6 +56,12 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
@@ -67,6 +73,12 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.5"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
@@ -78,6 +90,12 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
@@ -89,6 +107,12 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.7"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
@@ -100,6 +124,12 @@ steps:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.4"
|
||||
env:
|
||||
@@ -111,6 +141,12 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg94.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.5"
|
||||
env:
|
||||
@@ -122,6 +158,12 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg95.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.4"
|
||||
env:
|
||||
@@ -133,6 +175,12 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg94.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
env:
|
||||
@@ -144,6 +192,12 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg95.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 9.5"
|
||||
env:
|
||||
@@ -155,6 +209,12 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg95.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 11"
|
||||
env:
|
||||
@@ -166,3 +226,9 @@ steps:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg11.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
@@ -1,116 +1,8 @@
|
||||
version: 2
|
||||
jobs:
|
||||
dockerhubuploadrelease:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py2
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
sytestpy2:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
|
||||
sytestpy3:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
- image: matrixdotorg/sytest-synapse:dinsic
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
@@ -126,45 +18,7 @@ workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- sytestpy2:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy2postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- dockerhubuploadrelease:
|
||||
filters:
|
||||
tags:
|
||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
ignore: /develop|master|release-.*/
|
||||
@@ -9,11 +9,12 @@ source $BASH_ENV
|
||||
|
||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||
then
|
||||
echo "Can't figure out what the PR number is! Assuming merge target is develop."
|
||||
echo "Can't figure out what the PR number is! Assuming merge target is dinsic."
|
||||
|
||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||
# can probably assume it's based on it and will be merged into it.
|
||||
GITBASE="develop"
|
||||
# It probably hasn't had a PR opened yet. Since all PRs for dinsic land on
|
||||
# dinsic, we can probably assume it's based on it and will be merged into
|
||||
# it.
|
||||
GITBASE="dinsic"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
|
||||
133
CHANGES.md
133
CHANGES.md
@@ -1,8 +1,137 @@
|
||||
Synapse 1.0.0 (2019-06-11)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424))
|
||||
|
||||
|
||||
Synapse 1.0.0rc3 (2019-06-10)
|
||||
=============================
|
||||
|
||||
Security: Fix authentication bug introduced in 1.0.0rc1. Please upgrade to rc3 immediately
|
||||
|
||||
|
||||
Synapse 1.0.0rc2 (2019-06-10)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Remove redundant warning about key server response validation. ([\#5392](https://github.com/matrix-org/synapse/issues/5392))
|
||||
- Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. ([\#5415](https://github.com/matrix-org/synapse/issues/5415))
|
||||
- Fix excessive memory using with default `federation_verify_certificates: true` configuration. ([\#5417](https://github.com/matrix-org/synapse/issues/5417))
|
||||
|
||||
|
||||
Synapse 1.0.0rc1 (2019-06-07)
|
||||
=============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324))
|
||||
- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220))
|
||||
- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249))
|
||||
- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276))
|
||||
- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286))
|
||||
- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359))
|
||||
- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360))
|
||||
- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361))
|
||||
- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371))
|
||||
- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374))
|
||||
- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377))
|
||||
- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089))
|
||||
- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156))
|
||||
- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200))
|
||||
- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221))
|
||||
- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233))
|
||||
- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251))
|
||||
- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256))
|
||||
- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257))
|
||||
- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258))
|
||||
- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268))
|
||||
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
|
||||
- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275))
|
||||
- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277))
|
||||
- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293))
|
||||
- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294))
|
||||
- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300))
|
||||
- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307))
|
||||
- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309))
|
||||
- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317))
|
||||
- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321))
|
||||
- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333))
|
||||
- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334))
|
||||
- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340))
|
||||
- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341))
|
||||
- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342))
|
||||
- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354))
|
||||
- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352))
|
||||
- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355))
|
||||
- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362))
|
||||
- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282))
|
||||
- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216))
|
||||
- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328))
|
||||
- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227))
|
||||
- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230))
|
||||
- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356))
|
||||
- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283))
|
||||
- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284))
|
||||
- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287))
|
||||
- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288))
|
||||
- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303))
|
||||
- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320))
|
||||
- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332))
|
||||
- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344))
|
||||
- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353))
|
||||
- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370))
|
||||
- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380))
|
||||
|
||||
|
||||
Synapse 0.99.5.2 (2019-05-30)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
|
||||
|
||||
|
||||
Synapse 0.99.5.1 (2019-05-22)
|
||||
=============================
|
||||
|
||||
No significant changes.
|
||||
|
||||
0.99.5.1 supersedes 0.99.5 due to malformed debian changelog - no functional changes.
|
||||
|
||||
Synapse 0.99.5 (2019-05-22)
|
||||
===========================
|
||||
|
||||
39
INSTALL.md
39
INSTALL.md
@@ -1,13 +1,14 @@
|
||||
* [Installing Synapse](#installing-synapse)
|
||||
* [Installing from source](#installing-from-source)
|
||||
* [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
* [Troubleshooting Installation](#troubleshooting-installation)
|
||||
* [Prebuilt packages](#prebuilt-packages)
|
||||
* [Setting up Synapse](#setting-up-synapse)
|
||||
* [TLS certificates](#tls-certificates)
|
||||
* [Registering a user](#registering-a-user)
|
||||
* [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
* [URL previews](#url-previews)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
@@ -394,8 +395,22 @@ To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
|
||||
please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md)
|
||||
|
||||
|
||||
## Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. For example,
|
||||
this is required to support the 'password reset' feature.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||
``smtp_pass``, and ``require_transport_security``.
|
||||
|
||||
If Synapse is not configured with an SMTP server, password reset via email will
|
||||
be disabled by default.
|
||||
|
||||
## Registering a user
|
||||
|
||||
|
||||
12
MANIFEST.in
12
MANIFEST.in
@@ -9,14 +9,19 @@ include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.sql.postgres
|
||||
recursive-include synapse/storage/schema *.sql.sqlite
|
||||
recursive-include synapse/storage/schema *.py
|
||||
recursive-include synapse/storage/schema *.txt
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.py
|
||||
include tests/http/ca.crt
|
||||
include tests/http/ca.key
|
||||
include tests/http/server.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
@@ -43,3 +48,8 @@ prune .buildkite
|
||||
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
# FIXME: we shouldn't have these templates here
|
||||
recursive-include res/templates-dinsic *.css
|
||||
recursive-include res/templates-dinsic *.html
|
||||
recursive-include res/templates-dinsic *.txt
|
||||
|
||||
49
UPGRADE.rst
49
UPGRADE.rst
@@ -49,6 +49,55 @@ returned by the Client-Server API:
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v1.0
|
||||
=================
|
||||
|
||||
Validation of TLS certificates
|
||||
------------------------------
|
||||
|
||||
Synapse v1.0 is the first release to enforce
|
||||
validation of TLS certificates for the federation API. It is therefore
|
||||
essential that your certificates are correctly configured. See the `FAQ
|
||||
<docs/MSC1711_certificates_FAQ.md>`_ for more information.
|
||||
|
||||
Note, v1.0 installations will also no longer be able to federate with servers
|
||||
that have not correctly configured their certificates.
|
||||
|
||||
In rare cases, it may be desirable to disable certificate checking: for
|
||||
example, it might be essential to be able to federate with a given legacy
|
||||
server in a closed federation. This can be done in one of two ways:-
|
||||
|
||||
* Configure the global switch ``federation_verify_certificates`` to ``false``.
|
||||
* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
|
||||
|
||||
See the `sample configuration file <docs/sample_config.yaml>`_
|
||||
for more details on these settings.
|
||||
|
||||
Email
|
||||
-----
|
||||
When a user requests a password reset, Synapse will send an email to the
|
||||
user to confirm the request.
|
||||
|
||||
Previous versions of Synapse delegated the job of sending this email to an
|
||||
identity server. If the identity server was somehow malicious or became
|
||||
compromised, it would be theoretically possible to hijack an account through
|
||||
this means.
|
||||
|
||||
Therefore, by default, Synapse v1.0 will send the confirmation email itself. If
|
||||
Synapse is not configured with an SMTP server, password reset via email will be
|
||||
disabled.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||
``smtp_pass``, and ``require_transport_security``.
|
||||
|
||||
If you are absolutely certain that you wish to continue using an identity
|
||||
server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``.
|
||||
|
||||
See the `sample configuration file <docs/sample_config.yaml>`_
|
||||
for more details on these settings.
|
||||
|
||||
Upgrading to v0.99.0
|
||||
====================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Synapse now more efficiently collates room statistics.
|
||||
1
changelog.d/5083.feature
Normal file
1
changelog.d/5083.feature
Normal file
@@ -0,0 +1 @@
|
||||
Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API.
|
||||
1
changelog.d/5098.misc
Normal file
1
changelog.d/5098.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add workarounds for pep-517 install errors.
|
||||
@@ -1 +0,0 @@
|
||||
Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo.
|
||||
1
changelog.d/5214.feature
Normal file
1
changelog.d/5214.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow server admins to define and enforce a password policy (MSC2000).
|
||||
@@ -1 +0,0 @@
|
||||
Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2.
|
||||
@@ -1 +0,0 @@
|
||||
Run black on synapse.crypto.keyring.
|
||||
1
changelog.d/5363.feature
Normal file
1
changelog.d/5363.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow expired user to trigger renewal email sending manually.
|
||||
1
changelog.d/5378.misc
Normal file
1
changelog.d/5378.misc
Normal file
@@ -0,0 +1 @@
|
||||
Track deactivated accounts in the database.
|
||||
1
changelog.d/5394.bugfix
Normal file
1
changelog.d/5394.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where deactivated users could receive renewal emails if the account validity feature is on.
|
||||
1
changelog.d/5416.misc
Normal file
1
changelog.d/5416.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add unique index to the profile_replication_status table.
|
||||
1
changelog.d/5420.feature
Normal file
1
changelog.d/5420.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add configuration option to hide new users from the user directory.
|
||||
1
changelog.d/5440.feature
Normal file
1
changelog.d/5440.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow server admins to define implementations of extra rules for allowing or denying incoming events.
|
||||
1
changelog.d/5464.bugfix
Normal file
1
changelog.d/5464.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix missing invite state after exchanging 3PID invites over federaton.
|
||||
2
changelog.d/5465.misc
Normal file
2
changelog.d/5465.misc
Normal file
@@ -0,0 +1,2 @@
|
||||
Track deactivated accounts in the database.
|
||||
|
||||
1
changelog.d/5474.feature
Normal file
1
changelog.d/5474.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow server admins to define implementations of extra rules for allowing or denying incoming events.
|
||||
1
changelog.d/5477.feature
Normal file
1
changelog.d/5477.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow server admins to define implementations of extra rules for allowing or denying incoming events.
|
||||
1
changelog.d/5493.misc
Normal file
1
changelog.d/5493.misc
Normal file
@@ -0,0 +1 @@
|
||||
Track deactivated accounts in the database.
|
||||
1
changelog.d/5534.feature
Normal file
1
changelog.d/5534.feature
Normal file
@@ -0,0 +1 @@
|
||||
Split public rooms directory auth config in two settings, in order to manage client auth independently from the federation part of it. Obsoletes the "restrict_public_rooms_to_local_users" configuration setting. If "restrict_public_rooms_to_local_users" is set in the config, Synapse will act as if both new options are enabled, i.e. require authentication through the client API and deny federation requests.
|
||||
1
changelog.d/5576.bugfix
Normal file
1
changelog.d/5576.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited.
|
||||
1
changelog.d/5610.feature
Normal file
1
changelog.d/5610.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement new custom event rules for power levels.
|
||||
1
changelog.d/5644.bugfix
Normal file
1
changelog.d/5644.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix newly-registered users not being able to lookup their own profile without joining a room.
|
||||
1
changelog.d/5702.bugfix
Normal file
1
changelog.d/5702.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix 3PID invite to invite association detection in the Tchap room access rules.
|
||||
1
changelog.d/5760.feature
Normal file
1
changelog.d/5760.feature
Normal file
@@ -0,0 +1 @@
|
||||
Force the access rule to be "restricted" if the join rule is "public".
|
||||
1
changelog.d/5780.misc
Normal file
1
changelog.d/5780.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow looping calls to be given arguments.
|
||||
1
changelog.d/5807.feature
Normal file
1
changelog.d/5807.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature.
|
||||
1
changelog.d/5815.feature
Normal file
1
changelog.d/5815.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement per-room message retention policies.
|
||||
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,3 +1,15 @@
|
||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.0.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2019 17:09:53 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.5.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.5.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 30 May 2019 16:28:07 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.5.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.5.1.
|
||||
|
||||
@@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated
|
||||
configuration file or with a custom configuration that requires manual editing.
|
||||
|
||||
An easy way to make use of this image is via docker-compose. See the
|
||||
[contrib/docker](../contrib/docker) section of the synapse project for
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for
|
||||
examples.
|
||||
|
||||
### Without Compose (harder)
|
||||
|
||||
@@ -7,6 +7,7 @@ Requires a public/private key pair from:
|
||||
|
||||
https://developers.google.com/recaptcha/
|
||||
|
||||
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
||||
|
||||
Setting ReCaptcha Keys
|
||||
----------------------
|
||||
|
||||
@@ -1,5 +1,22 @@
|
||||
# MSC1711 Certificates FAQ
|
||||
|
||||
## Historical Note
|
||||
This document was originally written to guide server admins through the upgrade
|
||||
path towards Synapse 1.0. Specifically,
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
|
||||
required that all servers present valid TLS certificates on their federation
|
||||
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
|
||||
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
|
||||
certificate checks.
|
||||
|
||||
Much of what follows is now outdated since most admins will have already
|
||||
upgraded, however it may be of use to those with old installs returning to the
|
||||
project.
|
||||
|
||||
If you are setting up a server from scratch you almost certainly should look at
|
||||
the [installation guide](INSTALL.md) instead.
|
||||
|
||||
## Introduction
|
||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||
supports the r0.1 release of the server to server specification, but is
|
||||
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
|
||||
@@ -68,16 +85,14 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a
|
||||
.well-known entry (see below), should retain their SRV record and use it
|
||||
alongside their .well-known record.
|
||||
|
||||
**>= 5th March 2019 - Synapse 1.0.0 is released**
|
||||
**10th June 2019 - Synapse 1.0.0 is released**
|
||||
|
||||
1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one
|
||||
month after 5th February to upgrade to 0.99.0 and deploy their certificates. In
|
||||
1.0.0 is scheduled for release on 10th June. In
|
||||
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
|
||||
1.0.0 will enforce certificate validity. This means that any homeserver without a
|
||||
valid certificate after this point will no longer be able to federate with
|
||||
1.0.0 servers.
|
||||
|
||||
|
||||
## Configuring certificates for compatibility with Synapse 1.0.0
|
||||
|
||||
### If you do not currently have an SRV record
|
||||
@@ -145,12 +160,11 @@ You can do this with a `.well-known` file as follows:
|
||||
1. Keep the SRV record in place - it is needed for backwards compatibility
|
||||
with Synapse 0.34 and earlier.
|
||||
|
||||
2. Give synapse a certificate corresponding to the target domain
|
||||
(`customer.example.net` in the above example). Currently Synapse's ACME
|
||||
support [does not support
|
||||
this](https://github.com/matrix-org/synapse/issues/4552), so you will have
|
||||
to acquire a certificate yourself and give it to Synapse via
|
||||
`tls_certificate_path` and `tls_private_key_path`.
|
||||
2. Give Synapse a certificate corresponding to the target domain
|
||||
(`customer.example.net` in the above example). You can either use Synapse's
|
||||
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
|
||||
the `acme` section), or acquire a certificate yourself and give it to
|
||||
Synapse via `tls_certificate_path` and `tls_private_key_path`.
|
||||
|
||||
3. Restart Synapse to ensure the new certificate is loaded.
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility.
|
||||
Reset password
|
||||
==============
|
||||
|
||||
Changes the password of another user.
|
||||
Changes the password of another user. This will automatically log the user out of all their devices.
|
||||
|
||||
The api is::
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
||||
which is valid for your ``server_name``.
|
||||
|
||||
Once you have completed the steps necessary to federate, you should be able to
|
||||
join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
|
||||
room for Synapse admins.)
|
||||
Once federation has been configured, you should be able to join a room over
|
||||
federation. A good place to start is ``#synapse:matrix.org`` - a room for
|
||||
Synapse admins.
|
||||
|
||||
|
||||
## Delegation
|
||||
@@ -98,6 +98,77 @@ _matrix._tcp.<server_name>``. In our example, we would expect this:
|
||||
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
### Delegation FAQ
|
||||
#### When do I need a SRV record or .well-known URI?
|
||||
|
||||
If your homeserver listens on the default federation port (8448), and your
|
||||
`server_name` points to the host that your homeserver runs on, you do not need an SRV
|
||||
record or `.well-known/matrix/server` URI.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host,
|
||||
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
|
||||
it would automatically generate a valid TLS certificate for you via Let's Encrypt
|
||||
and no SRV record or .well-known URI would be needed.
|
||||
|
||||
This is the common case, although you can add an SRV record or
|
||||
`.well-known/matrix/server` URI for completeness if you wish.
|
||||
|
||||
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||
does not point to the host that your homeserver runs on, you will need to let
|
||||
other servers know how to find it. The way to do this is via .well-known or an
|
||||
SRV record.
|
||||
|
||||
#### I have created a .well-known URI. Do I still need an SRV record?
|
||||
|
||||
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
|
||||
URI and follow any delegation it suggests. It will only then check for the
|
||||
existence of an SRV record.
|
||||
|
||||
That means that the SRV record will often be redundant. However, you should
|
||||
remember that there may still be older versions of Synapse in the federation
|
||||
which do not understand .well-known URIs, so if you removed your SRV record
|
||||
you would no longer be able to federate with them.
|
||||
|
||||
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
|
||||
earlier will follow the SRV record (and not care about the invalid
|
||||
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
|
||||
correct certificate chain.
|
||||
|
||||
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
|
||||
|
||||
Yes, you are welcome to manage your certificates yourself. Synapse will only
|
||||
attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||
so.The only requirement is that there is a valid TLS cert present for
|
||||
federation end points.
|
||||
|
||||
#### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
Practically speaking, this is no longer necessary.
|
||||
|
||||
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
|
||||
needs the certificate is to populate a legacy `tls_fingerprints` field in the
|
||||
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
|
||||
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
|
||||
and generally this is delegated via `matrix.org`, which will be running a modern
|
||||
version of Synapse.
|
||||
|
||||
#### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||
same certificate on any ports where TLS is configured.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can use the [federation tester](
|
||||
|
||||
@@ -77,11 +77,25 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'true', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation. Defaults to 'false'.
|
||||
# If set to 'false', requires authentication to access the server's public rooms
|
||||
# directory through the client API. Defaults to 'true'.
|
||||
#
|
||||
#restrict_public_rooms_to_local_users: true
|
||||
#allow_public_rooms_without_auth: false
|
||||
|
||||
# If set to 'false', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'true'.
|
||||
#
|
||||
#allow_public_rooms_over_federation: false
|
||||
|
||||
# The default room version for newly created rooms.
|
||||
#
|
||||
# Known room versions are listed here:
|
||||
# https://matrix.org/docs/spec/#complete-list-of-room-versions
|
||||
#
|
||||
# For example, for room version 1, default_room_version should be set
|
||||
# to "1".
|
||||
#
|
||||
#default_room_version: "4"
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
#
|
||||
@@ -251,6 +265,22 @@ listeners:
|
||||
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Used in cases where the admin or server owner wants to limit to the
|
||||
# number of monthly active users.
|
||||
#
|
||||
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||
#
|
||||
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||
# the server will start blocking user actions.
|
||||
#
|
||||
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||
# means that users must be active for this number of days before they
|
||||
# can be considered active and guards against the case where lots of users
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
@@ -282,6 +312,74 @@ listeners:
|
||||
#
|
||||
#allow_per_room_profiles: false
|
||||
|
||||
# Whether to show the users on this homeserver in the user directory. Defaults to
|
||||
# 'true'.
|
||||
#
|
||||
#show_users_in_user_directory: false
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, a user won't be able to send a
|
||||
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
|
||||
# that's not within this range. This is especially useful in closed federations,
|
||||
# in which server admins can make sure every federating server applies the same
|
||||
# rules.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -303,12 +401,12 @@ listeners:
|
||||
#
|
||||
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
|
||||
|
||||
# Whether to verify TLS certificates when sending federation traffic.
|
||||
# Whether to verify TLS server certificates for outbound federation requests.
|
||||
#
|
||||
# This currently defaults to `false`, however this will change in
|
||||
# Synapse 1.0 when valid federation certificates will be required.
|
||||
# Defaults to `true`. To disable certificate verification, uncomment the
|
||||
# following line.
|
||||
#
|
||||
#federation_verify_certificates: true
|
||||
#federation_verify_certificates: false
|
||||
|
||||
# Skip federation certificate verification on the following whitelist
|
||||
# of domains.
|
||||
@@ -753,13 +851,25 @@ uploads_path: "DATADIR/uploads"
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed.
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||
# # user when trying to renew an account. Optional, defaults to
|
||||
# # synapse/res/templates.
|
||||
# template_dir: "res/templates"
|
||||
# # HTML to be displayed to the user after they successfully renewed their
|
||||
# # account. Optional.
|
||||
# account_renewed_html_path: "account_renewed.html"
|
||||
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||
# # renewal token. Optional.
|
||||
# invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
@@ -772,9 +882,32 @@ uploads_path: "DATADIR/uploads"
|
||||
#
|
||||
#disable_msisdn_registration: true
|
||||
|
||||
# Derive the user's matrix ID from a type of 3PID used when registering.
|
||||
# This overrides any matrix ID the user proposes when calling /register
|
||||
# The 3PID type should be present in registrations_require_3pid to avoid
|
||||
# users failing to register if they don't specify the right kind of 3pid.
|
||||
#
|
||||
#register_mxid_from_3pid: email
|
||||
|
||||
# Uncomment to set the display name of new users to their email address,
|
||||
# rather than using the default heuristic.
|
||||
#
|
||||
#register_just_use_email_for_display_name: true
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# Use an Identity Server to establish which 3PIDs are allowed to register?
|
||||
# Overrides allowed_local_3pids below.
|
||||
#
|
||||
#check_is_for_allowed_local_3pids: matrix.org
|
||||
#
|
||||
# If you are using an IS you can also check whether that IS registers
|
||||
# pending invites for the given 3PID (and then allow it to sign up on
|
||||
# the platform):
|
||||
#
|
||||
#allow_invited_3pids: False
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\.org'
|
||||
@@ -783,6 +916,11 @@ uploads_path: "DATADIR/uploads"
|
||||
# - medium: msisdn
|
||||
# pattern: '\+44'
|
||||
|
||||
# If true, stop users from trying to change the 3PIDs associated with
|
||||
# their accounts.
|
||||
#
|
||||
#disable_3pid_changes: False
|
||||
|
||||
# Enable 3PIDs lookup requests to identity servers from this server.
|
||||
#
|
||||
#enable_3pid_lookup: true
|
||||
@@ -824,6 +962,30 @@ uploads_path: "DATADIR/uploads"
|
||||
# - matrix.org
|
||||
# - vector.im
|
||||
|
||||
# If enabled, user IDs, display names and avatar URLs will be replicated
|
||||
# to this server whenever they change.
|
||||
# This is an experimental API currently implemented by sydent to support
|
||||
# cross-homeserver user directories.
|
||||
#
|
||||
#replicate_user_profiles_to: example.com
|
||||
|
||||
# If specified, attempt to replay registrations, profile changes & 3pid
|
||||
# bindings on the given target homeserver via the AS API. The HS is authed
|
||||
# via a given AS token.
|
||||
#
|
||||
#shadow_server:
|
||||
# hs_url: https://shadow.example.com
|
||||
# hs: shadow.example.com
|
||||
# as_token: 12u394refgbdhivsia
|
||||
|
||||
# If enabled, don't let users set their own display names/avatars
|
||||
# other than for the very first time (unless they are a server admin).
|
||||
# Useful when provisioning users based on the contents of a 3rd party
|
||||
# directory and to avoid ambiguities.
|
||||
#
|
||||
#disable_set_displayname: False
|
||||
#disable_set_avatar_url: False
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
@@ -924,12 +1086,43 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
#
|
||||
#perspectives:
|
||||
# servers:
|
||||
# "matrix.org":
|
||||
# verify_keys:
|
||||
# "ed25519:auto":
|
||||
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
# When we need to fetch a signing key, each server is tried in parallel.
|
||||
#
|
||||
# Normally, the connection to the key server is validated via TLS certificates.
|
||||
# Additional security can be provided by configuring a `verify key`, which
|
||||
# will make synapse check that the response is signed by that key.
|
||||
#
|
||||
# This setting supercedes an older setting named `perspectives`. The old format
|
||||
# is still supported for backwards-compatibility, but it is deprecated.
|
||||
#
|
||||
# Options for each entry in the list include:
|
||||
#
|
||||
# server_name: the name of the server. required.
|
||||
#
|
||||
# verify_keys: an optional map from key id to base64-encoded public key.
|
||||
# If specified, we will check that the response is signed by at least
|
||||
# one of the given keys.
|
||||
#
|
||||
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
|
||||
# and federation_verify_certificates is not `true`, synapse will refuse
|
||||
# to start, because this would allow anyone who can spoof DNS responses
|
||||
# to masquerade as the trusted key server. If you know what you are doing
|
||||
# and are sure that your network environment provides a secure connection
|
||||
# to the key server, you can set this to `true` to override this
|
||||
# behaviour.
|
||||
#
|
||||
# An example configuration might look like:
|
||||
#
|
||||
#trusted_key_servers:
|
||||
# - server_name: "my_trusted_server.example.com"
|
||||
# verify_keys:
|
||||
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
|
||||
# - server_name: "my_other_trusted_server.example.com"
|
||||
#
|
||||
# The default configuration is:
|
||||
#
|
||||
#trusted_key_servers:
|
||||
# - server_name: "matrix.org"
|
||||
|
||||
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
@@ -1004,12 +1197,40 @@ password_config:
|
||||
#
|
||||
#pepper: "EVEN_MORE_SECRET"
|
||||
|
||||
# Define and enforce a password policy. Each parameter is optional, boolean
|
||||
# parameters default to 'false' and integer parameters default to 0.
|
||||
# This is an early implementation of MSC2000.
|
||||
#
|
||||
#policy:
|
||||
# Whether to enforce the password policy.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Minimum accepted length for a password.
|
||||
#
|
||||
#minimum_length: 15
|
||||
|
||||
# Whether a password must contain at least one digit.
|
||||
#
|
||||
#require_digit: true
|
||||
|
||||
# Whether a password must contain at least one symbol.
|
||||
# A symbol is any character that's not a number or a letter.
|
||||
#
|
||||
#require_symbol: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
#
|
||||
#require_lowercase: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
#
|
||||
#require_uppercase: true
|
||||
|
||||
|
||||
# Enable sending emails for notification events or expiry notices
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
|
||||
# Enable sending emails for password resets, notification events or
|
||||
# account expiry notices
|
||||
#
|
||||
# If your SMTP server requires authentication, the optional smtp_user &
|
||||
# smtp_pass variables should be used
|
||||
@@ -1017,22 +1238,64 @@ password_config:
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: False
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# # if template_dir is unset, uses the example templates that are part of
|
||||
# # the Synapse distribution.
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# notif_for_new_users: True
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
# # the "app_name" setting is ignored
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Enable sending password reset emails via the configured, trusted
|
||||
# # identity servers
|
||||
# #
|
||||
# # IMPORTANT! This will give a malicious or overtaken identity server
|
||||
# # the ability to reset passwords for your users! Make absolutely sure
|
||||
# # that you want to do this! It is strongly recommended that password
|
||||
# # reset emails be sent by the homeserver instead
|
||||
# #
|
||||
# # If this option is set to false and SMTP options have not been
|
||||
# # configured, resetting user passwords via email will be disabled
|
||||
# #trust_identity_server_for_password_resets: false
|
||||
#
|
||||
# # Configure the time that a validation email or text message code
|
||||
# # will expire after sending
|
||||
# #
|
||||
# # This is currently used for password resets
|
||||
# #validation_token_lifetime: 1h
|
||||
#
|
||||
# # Template directory. All template files should be stored within this
|
||||
# # directory
|
||||
# #
|
||||
# #template_dir: res/templates
|
||||
#
|
||||
# # Templates for email notifications
|
||||
# #
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# # Templates for account expiry notices.
|
||||
#
|
||||
# # Templates for account expiry notices
|
||||
# #
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Templates for password reset emails sent by the homeserver
|
||||
# #
|
||||
# #password_reset_template_html: password_reset.html
|
||||
# #password_reset_template_text: password_reset.txt
|
||||
#
|
||||
# # Templates for password reset success and failure pages that a user
|
||||
# # will see after attempting to reset their password
|
||||
# #
|
||||
# #password_reset_template_success_html: password_reset_success.html
|
||||
# #password_reset_template_failure_html: password_reset_failure.html
|
||||
|
||||
|
||||
#password_providers:
|
||||
@@ -1093,13 +1356,18 @@ password_config:
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||
# rebuild the user_directory search indexes, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
#user_directory:
|
||||
# enabled: true
|
||||
# search_all_users: false
|
||||
#
|
||||
# # If this is set, user search will be delegated to this ID server instead
|
||||
# # of synapse performing the search itself.
|
||||
# # This is an experimental API.
|
||||
# defer_to_id_server: https://id.example.com
|
||||
|
||||
|
||||
# User Consent configuration
|
||||
@@ -1252,3 +1520,16 @@ password_config:
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
|
||||
|
||||
# Server admins can define a Python module that implements extra rules for
|
||||
# allowing or denying incoming events. In order to work, this module needs to
|
||||
# override the methods defined in synapse/events/third_party_rules.py.
|
||||
#
|
||||
# This feature is designed to be used in closed federations only, where each
|
||||
# participating server enforces the same rules.
|
||||
#
|
||||
#third_party_event_rules:
|
||||
# module: "my_custom_project.SuperRulesSet"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
|
||||
@@ -7,11 +7,7 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
quickest solution to fix it is:
|
||||
|
||||
```
|
||||
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
```
|
||||
|
||||
and restart the synapse, which should then start a background task to
|
||||
solution to fix it is to execute the SQL here
|
||||
https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
7
res/templates-dinsic/mail-Vector.css
Normal file
7
res/templates-dinsic/mail-Vector.css
Normal file
@@ -0,0 +1,7 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
156
res/templates-dinsic/mail.css
Normal file
156
res/templates-dinsic/mail.css
Normal file
@@ -0,0 +1,156 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
45
res/templates-dinsic/notif.html
Normal file
45
res/templates-dinsic/notif.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">Voir {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
16
res/templates-dinsic/notif.txt
Normal file
16
res/templates-dinsic/notif.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
Voir {{ room.title }} à {{ notif.link }}
|
||||
55
res/templates-dinsic/notif_mail.html
Normal file
55
res/templates-dinsic/notif_mail.html
Normal file
@@ -0,0 +1,55 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Bonjour {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Se désinscrire</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
10
res/templates-dinsic/notif_mail.txt
Normal file
10
res/templates-dinsic/notif_mail.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Bonjour {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}
|
||||
|
||||
33
res/templates-dinsic/room.html
Normal file
33
res/templates-dinsic/room.html
Normal file
@@ -0,0 +1,33 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Rejoindre la conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
9
res/templates-dinsic/room.txt
Normal file
9
res/templates-dinsic/room.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -20,9 +20,7 @@ class CallVisitor(ast.NodeVisitor):
|
||||
else:
|
||||
return
|
||||
|
||||
if name == "client_path_patterns":
|
||||
PATTERNS_V1.append(node.args[0].s)
|
||||
elif name == "client_v2_patterns":
|
||||
if name == "client_patterns":
|
||||
PATTERNS_V2.append(node.args[0].s)
|
||||
|
||||
|
||||
|
||||
37
scripts/generate_signing_key.py
Executable file
37
scripts/generate_signing_key.py
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from signedjson.key import write_signing_keys, generate_signing_key
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--output_file",
|
||||
|
||||
type=argparse.FileType('w'),
|
||||
default=sys.stdout,
|
||||
help="Where to write the output to",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
key_id = "a_" + random_string(4)
|
||||
key = generate_signing_key(key_id),
|
||||
write_signing_keys(args.output_file, key)
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.99.5.1"
|
||||
__version__ = "1.0.0"
|
||||
|
||||
@@ -184,11 +184,22 @@ class Auth(object):
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_req(self, request, allow_guest=False, rights="access"):
|
||||
def get_user_by_req(
|
||||
self,
|
||||
request,
|
||||
allow_guest=False,
|
||||
rights="access",
|
||||
allow_expired=False,
|
||||
):
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request - An HTTP request with an access_token query parameter.
|
||||
allow_expired - Whether to allow the request through even if the account is
|
||||
expired. If true, Synapse will still require an access token to be
|
||||
provided but won't check if the account it belongs to has expired. This
|
||||
works thanks to /login delivering access tokens regardless of accounts'
|
||||
expiration.
|
||||
Returns:
|
||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||
Raises:
|
||||
@@ -207,6 +218,7 @@ class Auth(object):
|
||||
)
|
||||
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
|
||||
@@ -229,7 +241,7 @@ class Auth(object):
|
||||
is_guest = user_info["is_guest"]
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled:
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
user_id = user.to_string()
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
|
||||
@@ -268,39 +280,40 @@ class Auth(object):
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
)
|
||||
|
||||
if app_service is None:
|
||||
defer.returnValue((None, None))
|
||||
return(None, None)
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
defer.returnValue((None, None))
|
||||
return(None, None)
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return(app_service.sender, app_service)
|
||||
|
||||
user_id = request.args[b"user_id"][0].decode('utf8')
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return(app_service.sender, app_service)
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service cannot masquerade as this user."
|
||||
)
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service has not registered this user"
|
||||
)
|
||||
defer.returnValue((user_id, app_service))
|
||||
# Let ASes manipulate nonexistent users (e.g. to shadow-register them)
|
||||
# if not (yield self.store.get_user_by_id(user_id)):
|
||||
# raise AuthError(
|
||||
# 403,
|
||||
# "Application service has not registered this user"
|
||||
# )
|
||||
return(user_id, app_service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(self, token, rights="access"):
|
||||
@@ -533,24 +546,15 @@ class Auth(object):
|
||||
defer.returnValue(user_info)
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
except KeyError:
|
||||
(user_id, app_service) = self._get_appservice_user_id(request)
|
||||
if not app_service:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
request.authenticated_entity = app_service.sender
|
||||
return app_service
|
||||
|
||||
def is_server_admin(self, user):
|
||||
""" Check if the given user is a local server admin.
|
||||
|
||||
@@ -83,6 +83,7 @@ class EventTypes(object):
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
Encryption = "m.room.encryption"
|
||||
|
||||
# These are used for validation
|
||||
Message = "m.room.message"
|
||||
@@ -92,6 +93,8 @@ class EventTypes(object):
|
||||
ServerACL = "m.room.server_acl"
|
||||
Pinned = "m.room.pinned_events"
|
||||
|
||||
Retention = "m.room.retention"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -61,6 +62,13 @@ class Codes(object):
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
||||
PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
|
||||
PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
|
||||
PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
|
||||
PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
|
||||
PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
|
||||
PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -339,6 +347,15 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class ThreepidValidationError(SynapseError):
|
||||
"""An error raised when there was a problem authorising an event."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.FORBIDDEN
|
||||
super(ThreepidValidationError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support.
|
||||
|
||||
@@ -363,6 +380,22 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class PasswordRefusedError(SynapseError):
|
||||
"""A password has been refused, either during password reset/change or registration.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
msg="This password doesn't comply with the server's policy",
|
||||
errcode=Codes.WEAK_PASSWORD,
|
||||
):
|
||||
super(PasswordRefusedError, self).__init__(
|
||||
code=400,
|
||||
msg=msg,
|
||||
errcode=errcode,
|
||||
)
|
||||
|
||||
|
||||
class RequestSendFailed(RuntimeError):
|
||||
"""Sending a HTTP request over federation failed due to not being able to
|
||||
talk to the remote server for some reason.
|
||||
|
||||
@@ -50,6 +50,7 @@ class RoomVersion(object):
|
||||
disposition = attr.ib() # str; one of the RoomDispositions
|
||||
event_format = attr.ib() # int; one of the EventFormatVersions
|
||||
state_res = attr.ib() # int; one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib() # bool
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
@@ -58,35 +59,36 @@ class RoomVersions(object):
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V1,
|
||||
)
|
||||
STATE_V2_TEST = RoomVersion(
|
||||
"state-v2-test",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
)
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
|
||||
KNOWN_ROOM_VERSIONS = {
|
||||
@@ -94,7 +96,7 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V1,
|
||||
RoomVersions.V2,
|
||||
RoomVersions.V3,
|
||||
RoomVersions.STATE_V2_TEST,
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
)
|
||||
} # type: dict[str, RoomVersion]
|
||||
|
||||
@@ -26,6 +26,7 @@ CLIENT_API_PREFIX = "/_matrix/client"
|
||||
FEDERATION_PREFIX = "/_matrix/federation"
|
||||
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
||||
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
||||
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
|
||||
@@ -344,15 +344,21 @@ class _LimitedHostnameResolver(object):
|
||||
|
||||
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
|
||||
addressTypes=None, transportSemantics='TCP'):
|
||||
# Note this is happening deep within the reactor, so we don't need to
|
||||
# worry about log contexts.
|
||||
|
||||
# We need this function to return `resolutionReceiver` so we do all the
|
||||
# actual logic involving deferreds in a separate function.
|
||||
self._resolve(
|
||||
resolutionReceiver, hostName, portNumber,
|
||||
addressTypes, transportSemantics,
|
||||
)
|
||||
|
||||
# even though this is happening within the depths of twisted, we need to drop
|
||||
# our logcontext before starting _resolve, otherwise: (a) _resolve will drop
|
||||
# the logcontext if it returns an incomplete deferred; (b) _resolve will
|
||||
# call the resolutionReceiver *with* a logcontext, which it won't be expecting.
|
||||
with PreserveLoggingContext():
|
||||
self._resolve(
|
||||
resolutionReceiver,
|
||||
hostName,
|
||||
portNumber,
|
||||
addressTypes,
|
||||
transportSemantics,
|
||||
)
|
||||
|
||||
return resolutionReceiver
|
||||
|
||||
|
||||
@@ -37,8 +37,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
|
||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -49,11 +48,11 @@ from synapse.util.versionstring import get_version_string
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
class PresenceStatusStubServlet(RestServlet):
|
||||
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__(hs)
|
||||
super(PresenceStatusStubServlet, self).__init__()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
@@ -84,7 +83,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
||||
@@ -265,7 +265,7 @@ class ApplicationService(object):
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def get_exlusive_user_regexes(self):
|
||||
def get_exclusive_user_regexes(self):
|
||||
"""Get the list of regexes used to determine if a user is exclusively
|
||||
registered by the AS
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,12 +31,76 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class EmailConfig(Config):
|
||||
def read_config(self, config):
|
||||
# TODO: We should separate better the email configuration from the notification
|
||||
# and account validity config.
|
||||
|
||||
self.email_enable_notifs = False
|
||||
|
||||
email_config = config.get("email", {})
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
self.email_smtp_host = email_config.get("smtp_host", None)
|
||||
self.email_smtp_port = email_config.get("smtp_port", None)
|
||||
self.email_smtp_user = email_config.get("smtp_user", None)
|
||||
self.email_smtp_pass = email_config.get("smtp_pass", None)
|
||||
self.require_transport_security = email_config.get(
|
||||
"require_transport_security", False
|
||||
)
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
self.email_app_name = "Matrix"
|
||||
|
||||
# TODO: Rename notif_from to something more generic, or have a separate
|
||||
# from for password resets, message notifications, etc?
|
||||
# Currently the email section is a bit bogged down with settings for
|
||||
# multiple functions. Would be good to split it out into separate
|
||||
# sections and only put the common ones under email:
|
||||
self.email_notif_from = email_config.get("notif_from", None)
|
||||
if self.email_notif_from is not None:
|
||||
# make sure it's valid
|
||||
parsed = email.utils.parseaddr(self.email_notif_from)
|
||||
if parsed[1] == '':
|
||||
raise RuntimeError("Invalid notif_from address")
|
||||
|
||||
template_dir = email_config.get("template_dir")
|
||||
# we need an absolute path, because we change directory after starting (and
|
||||
# we don't yet know what auxilliary templates like mail.css we will need).
|
||||
# (Note that loading as package_resources with jinja.PackageLoader doesn't
|
||||
# work for the same reason.)
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename(
|
||||
'synapse', 'res/templates'
|
||||
)
|
||||
|
||||
self.email_template_dir = os.path.abspath(template_dir)
|
||||
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
account_validity_renewal_enabled = config.get(
|
||||
"account_validity", {},
|
||||
).get("renew_at")
|
||||
|
||||
email_trust_identity_server_for_password_resets = email_config.get(
|
||||
"trust_identity_server_for_password_resets", False,
|
||||
)
|
||||
self.email_password_reset_behaviour = (
|
||||
"remote" if email_trust_identity_server_for_password_resets else "local"
|
||||
)
|
||||
if self.email_password_reset_behaviour == "local" and email_config == {}:
|
||||
logger.warn(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
self.email_password_reset_behaviour = "off"
|
||||
|
||||
# Get lifetime of a validation token in milliseconds
|
||||
self.email_validation_token_lifetime = self.parse_duration(
|
||||
email_config.get("validation_token_lifetime", "1h")
|
||||
)
|
||||
|
||||
if (
|
||||
self.email_enable_notifs
|
||||
or account_validity_renewal_enabled
|
||||
or self.email_password_reset_behaviour == "local"
|
||||
):
|
||||
# make sure we can import the required deps
|
||||
import jinja2
|
||||
import bleach
|
||||
@@ -42,6 +108,68 @@ class EmailConfig(Config):
|
||||
jinja2
|
||||
bleach
|
||||
|
||||
if self.email_password_reset_behaviour == "local":
|
||||
required = [
|
||||
"smtp_host",
|
||||
"smtp_port",
|
||||
"notif_from",
|
||||
]
|
||||
|
||||
missing = []
|
||||
for k in required:
|
||||
if k not in email_config:
|
||||
missing.append(k)
|
||||
|
||||
if (len(missing) > 0):
|
||||
raise RuntimeError(
|
||||
"email.password_reset_behaviour is set to 'local' "
|
||||
"but required keys are missing: %s" %
|
||||
(", ".join(["email." + k for k in missing]),)
|
||||
)
|
||||
|
||||
# Templates for password reset emails
|
||||
self.email_password_reset_template_html = email_config.get(
|
||||
"password_reset_template_html", "password_reset.html",
|
||||
)
|
||||
self.email_password_reset_template_text = email_config.get(
|
||||
"password_reset_template_text", "password_reset.txt",
|
||||
)
|
||||
self.email_password_reset_failure_template = email_config.get(
|
||||
"password_reset_failure_template", "password_reset_failure.html",
|
||||
)
|
||||
# This template does not support any replaceable variables, so we will
|
||||
# read it from the disk once during setup
|
||||
email_password_reset_success_template = email_config.get(
|
||||
"password_reset_success_template", "password_reset_success.html",
|
||||
)
|
||||
|
||||
# Check templates exist
|
||||
for f in [self.email_password_reset_template_html,
|
||||
self.email_password_reset_template_text,
|
||||
self.email_password_reset_failure_template,
|
||||
email_password_reset_success_template]:
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find template file %s" % (p, ))
|
||||
|
||||
# Retrieve content of web templates
|
||||
filepath = os.path.join(
|
||||
self.email_template_dir,
|
||||
email_password_reset_success_template,
|
||||
)
|
||||
self.email_password_reset_success_html_content = self.read_file(
|
||||
filepath,
|
||||
"email.password_reset_template_success_html",
|
||||
)
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
raise RuntimeError(
|
||||
"email.password_reset_behaviour is set to 'local' but no "
|
||||
"public_baseurl is set. This is necessary to generate password "
|
||||
"reset links"
|
||||
)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
required = [
|
||||
"smtp_host",
|
||||
"smtp_port",
|
||||
@@ -66,34 +194,13 @@ class EmailConfig(Config):
|
||||
"email.enable_notifs is True but no public_baseurl is set"
|
||||
)
|
||||
|
||||
self.email_smtp_host = email_config["smtp_host"]
|
||||
self.email_smtp_port = email_config["smtp_port"]
|
||||
self.email_notif_from = email_config["notif_from"]
|
||||
self.email_notif_template_html = email_config["notif_template_html"]
|
||||
self.email_notif_template_text = email_config["notif_template_text"]
|
||||
self.email_expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html",
|
||||
)
|
||||
self.email_expiry_template_text = email_config.get(
|
||||
"expiry_template_text", "notice_expiry.txt",
|
||||
)
|
||||
|
||||
template_dir = email_config.get("template_dir")
|
||||
# we need an absolute path, because we change directory after starting (and
|
||||
# we don't yet know what auxilliary templates like mail.css we will need).
|
||||
# (Note that loading as package_resources with jinja.PackageLoader doesn't
|
||||
# work for the same reason.)
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename(
|
||||
'synapse', 'res/templates'
|
||||
)
|
||||
template_dir = os.path.abspath(template_dir)
|
||||
|
||||
for f in self.email_notif_template_text, self.email_notif_template_html:
|
||||
p = os.path.join(template_dir, f)
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find email template file %s" % (p, ))
|
||||
self.email_template_dir = template_dir
|
||||
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
@@ -101,35 +208,24 @@ class EmailConfig(Config):
|
||||
self.email_riot_base_url = email_config.get(
|
||||
"riot_base_url", None
|
||||
)
|
||||
self.email_smtp_user = email_config.get(
|
||||
"smtp_user", None
|
||||
)
|
||||
self.email_smtp_pass = email_config.get(
|
||||
"smtp_pass", None
|
||||
)
|
||||
self.require_transport_security = email_config.get(
|
||||
"require_transport_security", False
|
||||
)
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
self.email_app_name = "Matrix"
|
||||
|
||||
# make sure it's valid
|
||||
parsed = email.utils.parseaddr(self.email_notif_from)
|
||||
if parsed[1] == '':
|
||||
raise RuntimeError("Invalid notif_from address")
|
||||
else:
|
||||
self.email_enable_notifs = False
|
||||
# Not much point setting defaults for the rest: it would be an
|
||||
# error for them to be used.
|
||||
if account_validity_renewal_enabled:
|
||||
self.email_expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html",
|
||||
)
|
||||
self.email_expiry_template_text = email_config.get(
|
||||
"expiry_template_text", "notice_expiry.txt",
|
||||
)
|
||||
|
||||
for f in self.email_expiry_template_text, self.email_expiry_template_html:
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find email template file %s" % (p, ))
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for notification events or expiry notices
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
# Enable sending emails for password resets, notification events or
|
||||
# account expiry notices
|
||||
#
|
||||
# If your SMTP server requires authentication, the optional smtp_user &
|
||||
# smtp_pass variables should be used
|
||||
@@ -137,20 +233,62 @@ class EmailConfig(Config):
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25
|
||||
# smtp_port: 25 # SSL: 465, STARTTLS: 587
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: False
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# # if template_dir is unset, uses the example templates that are part of
|
||||
# # the Synapse distribution.
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# notif_for_new_users: True
|
||||
#
|
||||
# # Defining a custom URL for Riot is only needed if email notifications
|
||||
# # should contain links to a self-hosted installation of Riot; when set
|
||||
# # the "app_name" setting is ignored
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Enable sending password reset emails via the configured, trusted
|
||||
# # identity servers
|
||||
# #
|
||||
# # IMPORTANT! This will give a malicious or overtaken identity server
|
||||
# # the ability to reset passwords for your users! Make absolutely sure
|
||||
# # that you want to do this! It is strongly recommended that password
|
||||
# # reset emails be sent by the homeserver instead
|
||||
# #
|
||||
# # If this option is set to false and SMTP options have not been
|
||||
# # configured, resetting user passwords via email will be disabled
|
||||
# #trust_identity_server_for_password_resets: false
|
||||
#
|
||||
# # Configure the time that a validation email or text message code
|
||||
# # will expire after sending
|
||||
# #
|
||||
# # This is currently used for password resets
|
||||
# #validation_token_lifetime: 1h
|
||||
#
|
||||
# # Template directory. All template files should be stored within this
|
||||
# # directory
|
||||
# #
|
||||
# #template_dir: res/templates
|
||||
#
|
||||
# # Templates for email notifications
|
||||
# #
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# # Templates for account expiry notices.
|
||||
#
|
||||
# # Templates for account expiry notices
|
||||
# #
|
||||
# expiry_template_html: notice_expiry.html
|
||||
# expiry_template_text: notice_expiry.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
#
|
||||
# # Templates for password reset emails sent by the homeserver
|
||||
# #
|
||||
# #password_reset_template_html: password_reset.html
|
||||
# #password_reset_template_text: password_reset.txt
|
||||
#
|
||||
# # Templates for password reset success and failure pages that a user
|
||||
# # will see after attempting to reset their password
|
||||
# #
|
||||
# #password_reset_template_success_html: password_reset_success.html
|
||||
# #password_reset_template_failure_html: password_reset_failure.html
|
||||
"""
|
||||
|
||||
@@ -38,6 +38,7 @@ from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .stats import StatsConfig
|
||||
from .third_party_event_rules import ThirdPartyRulesConfig
|
||||
from .tls import TlsConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
from .voip import VoipConfig
|
||||
@@ -73,5 +74,6 @@ class HomeServerConfig(
|
||||
StatsConfig,
|
||||
ServerNoticesConfig,
|
||||
RoomDirectoryConfig,
|
||||
ThirdPartyRulesConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,6 +18,8 @@ import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
import attr
|
||||
import jsonschema
|
||||
from signedjson.key import (
|
||||
NACL_ED25519,
|
||||
decode_signing_key_base64,
|
||||
@@ -32,11 +35,36 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
INSECURE_NOTARY_ERROR = """\
|
||||
Your server is configured to accept key server responses without signature
|
||||
validation or TLS certificate validation. This is likely to be very insecure. If
|
||||
you are *sure* you want to do this, set 'accept_keys_insecurely' on the
|
||||
keyserver configuration."""
|
||||
|
||||
RELYING_ON_MATRIX_KEY_ERROR = """\
|
||||
Your server is configured to accept key server responses without TLS certificate
|
||||
validation, and which are only signed by the old (possibly compromised)
|
||||
matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do,
|
||||
and you should enable 'federation_verify_certificates' in your configuration.
|
||||
|
||||
If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
|
||||
trusted_key_server configuration."""
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KeyConfig(Config):
|
||||
@attr.s
|
||||
class TrustedKeyServer(object):
|
||||
# string: name of the server.
|
||||
server_name = attr.ib()
|
||||
|
||||
# dict[str,VerifyKey]|None: map from key id to key object, or None to disable
|
||||
# signature verification.
|
||||
verify_keys = attr.ib(default=None)
|
||||
|
||||
|
||||
class KeyConfig(Config):
|
||||
def read_config(self, config):
|
||||
# the signing key can be specified inline or in a separate file
|
||||
if "signing_key" in config:
|
||||
@@ -49,16 +77,27 @@ class KeyConfig(Config):
|
||||
config.get("old_signing_keys", {})
|
||||
)
|
||||
self.key_refresh_interval = self.parse_duration(
|
||||
config.get("key_refresh_interval", "1d"),
|
||||
config.get("key_refresh_interval", "1d")
|
||||
)
|
||||
self.perspectives = self.read_perspectives(
|
||||
config.get("perspectives", {}).get("servers", {
|
||||
"matrix.org": {"verify_keys": {
|
||||
"ed25519:auto": {
|
||||
"key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
|
||||
}
|
||||
}}
|
||||
})
|
||||
|
||||
# if neither trusted_key_servers nor perspectives are given, use the default.
|
||||
if "perspectives" not in config and "trusted_key_servers" not in config:
|
||||
key_servers = [{"server_name": "matrix.org"}]
|
||||
else:
|
||||
key_servers = config.get("trusted_key_servers", [])
|
||||
|
||||
if not isinstance(key_servers, list):
|
||||
raise ConfigError(
|
||||
"trusted_key_servers, if given, must be a list, not a %s"
|
||||
% (type(key_servers).__name__,)
|
||||
)
|
||||
|
||||
# merge the 'perspectives' config into the 'trusted_key_servers' config.
|
||||
key_servers.extend(_perspectives_to_key_servers(config))
|
||||
|
||||
# list of TrustedKeyServer objects
|
||||
self.key_servers = list(
|
||||
_parse_key_servers(key_servers, self.federation_verify_certificates)
|
||||
)
|
||||
|
||||
self.macaroon_secret_key = config.get(
|
||||
@@ -78,8 +117,9 @@ class KeyConfig(Config):
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, generate_secrets=False,
|
||||
**kwargs):
|
||||
def default_config(
|
||||
self, config_dir_path, server_name, generate_secrets=False, **kwargs
|
||||
):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
if generate_secrets:
|
||||
@@ -91,7 +131,8 @@ class KeyConfig(Config):
|
||||
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
|
||||
form_secret = "# form_secret: <PRIVATE STRING>"
|
||||
|
||||
return """\
|
||||
return (
|
||||
"""\
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
# the registration_shared_secret is used, if one is given; otherwise,
|
||||
# a secret key is derived from the signing key.
|
||||
@@ -133,33 +174,53 @@ class KeyConfig(Config):
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
#
|
||||
#perspectives:
|
||||
# servers:
|
||||
# "matrix.org":
|
||||
# verify_keys:
|
||||
# "ed25519:auto":
|
||||
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
""" % locals()
|
||||
|
||||
def read_perspectives(self, perspectives_servers):
|
||||
servers = {}
|
||||
for server_name, server_config in perspectives_servers.items():
|
||||
for key_id, key_data in server_config["verify_keys"].items():
|
||||
if is_signing_algorithm_supported(key_id):
|
||||
key_base64 = key_data["key"]
|
||||
key_bytes = decode_base64(key_base64)
|
||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||
servers.setdefault(server_name, {})[key_id] = verify_key
|
||||
return servers
|
||||
# When we need to fetch a signing key, each server is tried in parallel.
|
||||
#
|
||||
# Normally, the connection to the key server is validated via TLS certificates.
|
||||
# Additional security can be provided by configuring a `verify key`, which
|
||||
# will make synapse check that the response is signed by that key.
|
||||
#
|
||||
# This setting supercedes an older setting named `perspectives`. The old format
|
||||
# is still supported for backwards-compatibility, but it is deprecated.
|
||||
#
|
||||
# Options for each entry in the list include:
|
||||
#
|
||||
# server_name: the name of the server. required.
|
||||
#
|
||||
# verify_keys: an optional map from key id to base64-encoded public key.
|
||||
# If specified, we will check that the response is signed by at least
|
||||
# one of the given keys.
|
||||
#
|
||||
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
|
||||
# and federation_verify_certificates is not `true`, synapse will refuse
|
||||
# to start, because this would allow anyone who can spoof DNS responses
|
||||
# to masquerade as the trusted key server. If you know what you are doing
|
||||
# and are sure that your network environment provides a secure connection
|
||||
# to the key server, you can set this to `true` to override this
|
||||
# behaviour.
|
||||
#
|
||||
# An example configuration might look like:
|
||||
#
|
||||
#trusted_key_servers:
|
||||
# - server_name: "my_trusted_server.example.com"
|
||||
# verify_keys:
|
||||
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
|
||||
# - server_name: "my_other_trusted_server.example.com"
|
||||
#
|
||||
# The default configuration is:
|
||||
#
|
||||
#trusted_key_servers:
|
||||
# - server_name: "matrix.org"
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
||||
def read_signing_key(self, signing_key_path):
|
||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||
try:
|
||||
return read_signing_keys(signing_keys.splitlines(True))
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Error reading signing_key: %s" % (str(e))
|
||||
)
|
||||
raise ConfigError("Error reading signing_key: %s" % (str(e)))
|
||||
|
||||
def read_old_signing_keys(self, old_signing_keys):
|
||||
keys = {}
|
||||
@@ -182,9 +243,7 @@ class KeyConfig(Config):
|
||||
if not self.path_exists(signing_key_path):
|
||||
with open(signing_key_path, "w") as signing_key_file:
|
||||
key_id = "a_" + random_string(4)
|
||||
write_signing_keys(
|
||||
signing_key_file, (generate_signing_key(key_id),),
|
||||
)
|
||||
write_signing_keys(signing_key_file, (generate_signing_key(key_id),))
|
||||
else:
|
||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||
if len(signing_keys.split("\n")[0].split()) == 1:
|
||||
@@ -194,6 +253,116 @@ class KeyConfig(Config):
|
||||
NACL_ED25519, key_id, signing_keys.split("\n")[0]
|
||||
)
|
||||
with open(signing_key_path, "w") as signing_key_file:
|
||||
write_signing_keys(
|
||||
signing_key_file, (key,),
|
||||
write_signing_keys(signing_key_file, (key,))
|
||||
|
||||
|
||||
def _perspectives_to_key_servers(config):
|
||||
"""Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
|
||||
|
||||
Returns an iterable of entries to add to trusted_key_servers.
|
||||
"""
|
||||
|
||||
# 'perspectives' looks like:
|
||||
#
|
||||
# {
|
||||
# "servers": {
|
||||
# "matrix.org": {
|
||||
# "verify_keys": {
|
||||
# "ed25519:auto": {
|
||||
# "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# 'trusted_keys' looks like:
|
||||
#
|
||||
# [
|
||||
# {
|
||||
# "server_name": "matrix.org",
|
||||
# "verify_keys": {
|
||||
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
|
||||
perspectives_servers = config.get("perspectives", {}).get("servers", {})
|
||||
|
||||
for server_name, server_opts in perspectives_servers.items():
|
||||
trusted_key_server_entry = {"server_name": server_name}
|
||||
verify_keys = server_opts.get("verify_keys")
|
||||
if verify_keys is not None:
|
||||
trusted_key_server_entry["verify_keys"] = {
|
||||
key_id: key_data["key"] for key_id, key_data in verify_keys.items()
|
||||
}
|
||||
yield trusted_key_server_entry
|
||||
|
||||
|
||||
TRUSTED_KEY_SERVERS_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "schema for the trusted_key_servers setting",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server_name": {"type": "string"},
|
||||
"verify_keys": {
|
||||
"type": "object",
|
||||
# each key must be a base64 string
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
},
|
||||
"required": ["server_name"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _parse_key_servers(key_servers, federation_verify_certificates):
|
||||
try:
|
||||
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
|
||||
|
||||
for server in key_servers:
|
||||
server_name = server["server_name"]
|
||||
result = TrustedKeyServer(server_name=server_name)
|
||||
|
||||
verify_keys = server.get("verify_keys")
|
||||
if verify_keys is not None:
|
||||
result.verify_keys = {}
|
||||
for key_id, key_base64 in verify_keys.items():
|
||||
if not is_signing_algorithm_supported(key_id):
|
||||
raise ConfigError(
|
||||
"Unsupported signing algorithm on key %s for server %s in "
|
||||
"trusted_key_servers" % (key_id, server_name)
|
||||
)
|
||||
try:
|
||||
key_bytes = decode_base64(key_base64)
|
||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Unable to parse key %s for server %s in "
|
||||
"trusted_key_servers: %s" % (key_id, server_name, e)
|
||||
)
|
||||
|
||||
result.verify_keys[key_id] = verify_key
|
||||
|
||||
if (
|
||||
not federation_verify_certificates and
|
||||
not server.get("accept_keys_insecurely")
|
||||
):
|
||||
_assert_keyserver_has_verify_keys(result)
|
||||
|
||||
yield result
|
||||
|
||||
|
||||
def _assert_keyserver_has_verify_keys(trusted_key_server):
|
||||
if not trusted_key_server.verify_keys:
|
||||
raise ConfigError(INSECURE_NOTARY_ERROR)
|
||||
|
||||
# also check that they are not blindly checking the old matrix.org key
|
||||
if trusted_key_server.server_name == "matrix.org" and any(
|
||||
key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys
|
||||
):
|
||||
raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +30,10 @@ class PasswordConfig(Config):
|
||||
self.password_enabled = password_config.get("enabled", True)
|
||||
self.password_pepper = password_config.get("pepper", "")
|
||||
|
||||
# Password policy
|
||||
self.password_policy = password_config.get("policy", {})
|
||||
self.password_policy_enabled = self.password_policy.pop("enabled", False)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
password_config:
|
||||
@@ -39,4 +45,34 @@ class PasswordConfig(Config):
|
||||
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
#
|
||||
#pepper: "EVEN_MORE_SECRET"
|
||||
|
||||
# Define and enforce a password policy. Each parameter is optional, boolean
|
||||
# parameters default to 'false' and integer parameters default to 0.
|
||||
# This is an early implementation of MSC2000.
|
||||
#
|
||||
#policy:
|
||||
# Whether to enforce the password policy.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Minimum accepted length for a password.
|
||||
#
|
||||
#minimum_length: 15
|
||||
|
||||
# Whether a password must contain at least one digit.
|
||||
#
|
||||
#require_digit: true
|
||||
|
||||
# Whether a password must contain at least one symbol.
|
||||
# A symbol is any character that's not a number or a letter.
|
||||
#
|
||||
#require_symbol: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
#
|
||||
#require_lowercase: true
|
||||
|
||||
# Whether a password must contain at least one lowercase letter.
|
||||
#
|
||||
#require_uppercase: true
|
||||
"""
|
||||
|
||||
@@ -13,8 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from distutils.util import strtobool
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
@@ -39,8 +42,38 @@ class AccountValidityConfig(Config):
|
||||
else:
|
||||
self.renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
self.startup_job_max_delta = self.period * 10. / 100.
|
||||
|
||||
if self.renew_by_email_enabled:
|
||||
if "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
template_dir = config.get("template_dir")
|
||||
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
|
||||
|
||||
if "account_renewed_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["account_renewed_html_path"])
|
||||
|
||||
self.account_renewed_html_content = self.read_file(
|
||||
file_path, "account_validity.account_renewed_html_path"
|
||||
)
|
||||
else:
|
||||
self.account_renewed_html_content = (
|
||||
"<html><body>Your account has been successfully renewed.</body><html>"
|
||||
)
|
||||
|
||||
if "invalid_token_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["invalid_token_html_path"])
|
||||
|
||||
self.invalid_token_html_content = self.read_file(
|
||||
file_path, "account_validity.invalid_token_html_path"
|
||||
)
|
||||
else:
|
||||
self.invalid_token_html_content = (
|
||||
"<html><body>Invalid renewal token.</body><html>"
|
||||
)
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
@@ -60,8 +93,19 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.check_is_for_allowed_local_3pids = config.get(
|
||||
"check_is_for_allowed_local_3pids", None
|
||||
)
|
||||
self.allow_invited_3pids = config.get("allow_invited_3pids", False)
|
||||
|
||||
self.disable_3pid_changes = config.get("disable_3pid_changes", False)
|
||||
|
||||
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
|
||||
self.register_just_use_email_for_display_name = config.get(
|
||||
"register_just_use_email_for_display_name", False,
|
||||
)
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config.get(
|
||||
@@ -81,6 +125,16 @@ class RegistrationConfig(Config):
|
||||
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||
|
||||
self.disable_set_displayname = config.get("disable_set_displayname", False)
|
||||
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
|
||||
|
||||
self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
|
||||
if not isinstance(self.replicate_user_profiles_to, list):
|
||||
self.replicate_user_profiles_to = [self.replicate_user_profiles_to, ]
|
||||
|
||||
self.shadow_server = config.get("shadow_server", None)
|
||||
self.rewrite_identity_server_urls = config.get("rewrite_identity_server_urls", {})
|
||||
|
||||
self.disable_msisdn_registration = (
|
||||
config.get("disable_msisdn_registration", False)
|
||||
)
|
||||
@@ -129,13 +183,25 @@ class RegistrationConfig(Config):
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed.
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10%% of the validity period.
|
||||
#
|
||||
#account_validity:
|
||||
# enabled: True
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %%(app)s account"
|
||||
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||
# # user when trying to renew an account. Optional, defaults to
|
||||
# # synapse/res/templates.
|
||||
# template_dir: "res/templates"
|
||||
# # HTML to be displayed to the user after they successfully renewed their
|
||||
# # account. Optional.
|
||||
# account_renewed_html_path: "account_renewed.html"
|
||||
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||
# # renewal token. Optional.
|
||||
# invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
@@ -148,9 +214,32 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#disable_msisdn_registration: true
|
||||
|
||||
# Derive the user's matrix ID from a type of 3PID used when registering.
|
||||
# This overrides any matrix ID the user proposes when calling /register
|
||||
# The 3PID type should be present in registrations_require_3pid to avoid
|
||||
# users failing to register if they don't specify the right kind of 3pid.
|
||||
#
|
||||
#register_mxid_from_3pid: email
|
||||
|
||||
# Uncomment to set the display name of new users to their email address,
|
||||
# rather than using the default heuristic.
|
||||
#
|
||||
#register_just_use_email_for_display_name: true
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# Use an Identity Server to establish which 3PIDs are allowed to register?
|
||||
# Overrides allowed_local_3pids below.
|
||||
#
|
||||
#check_is_for_allowed_local_3pids: matrix.org
|
||||
#
|
||||
# If you are using an IS you can also check whether that IS registers
|
||||
# pending invites for the given 3PID (and then allow it to sign up on
|
||||
# the platform):
|
||||
#
|
||||
#allow_invited_3pids: False
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
@@ -159,6 +248,11 @@ class RegistrationConfig(Config):
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
# If true, stop users from trying to change the 3PIDs associated with
|
||||
# their accounts.
|
||||
#
|
||||
#disable_3pid_changes: False
|
||||
|
||||
# Enable 3PIDs lookup requests to identity servers from this server.
|
||||
#
|
||||
#enable_3pid_lookup: true
|
||||
@@ -200,6 +294,30 @@ class RegistrationConfig(Config):
|
||||
# - matrix.org
|
||||
# - vector.im
|
||||
|
||||
# If enabled, user IDs, display names and avatar URLs will be replicated
|
||||
# to this server whenever they change.
|
||||
# This is an experimental API currently implemented by sydent to support
|
||||
# cross-homeserver user directories.
|
||||
#
|
||||
#replicate_user_profiles_to: example.com
|
||||
|
||||
# If specified, attempt to replay registrations, profile changes & 3pid
|
||||
# bindings on the given target homeserver via the AS API. The HS is authed
|
||||
# via a given AS token.
|
||||
#
|
||||
#shadow_server:
|
||||
# hs_url: https://shadow.example.com
|
||||
# hs: shadow.example.com
|
||||
# as_token: 12u394refgbdhivsia
|
||||
|
||||
# If enabled, don't let users set their own display names/avatars
|
||||
# other than for the very first time (unless they are a server admin).
|
||||
# Useful when provisioning users based on the contents of a 3rd party
|
||||
# directory and to avoid ambiguities.
|
||||
#
|
||||
#disable_set_displayname: False
|
||||
#disable_set_avatar_url: False
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
|
||||
@@ -20,6 +20,7 @@ import os.path
|
||||
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
@@ -35,6 +36,8 @@ logger = logging.Logger(__name__)
|
||||
# in the list.
|
||||
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
|
||||
|
||||
DEFAULT_ROOM_VERSION = "4"
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
|
||||
@@ -81,13 +84,49 @@ class ServerConfig(Config):
|
||||
"require_auth_for_profile_requests", False,
|
||||
)
|
||||
|
||||
# If set to 'True', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation.
|
||||
self.restrict_public_rooms_to_local_users = config.get(
|
||||
"restrict_public_rooms_to_local_users", False,
|
||||
if "restrict_public_rooms_to_local_users" in config and (
|
||||
"allow_public_rooms_without_auth" in config
|
||||
or "allow_public_rooms_over_federation" in config
|
||||
):
|
||||
raise ConfigError(
|
||||
"Can't use 'restrict_public_rooms_to_local_users' if"
|
||||
" 'allow_public_rooms_without_auth' and/or"
|
||||
" 'allow_public_rooms_over_federation' is set."
|
||||
)
|
||||
|
||||
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
|
||||
# flag is now obsolete but we need to check it for backward-compatibility.
|
||||
if config.get("restrict_public_rooms_to_local_users", False):
|
||||
self.allow_public_rooms_without_auth = False
|
||||
self.allow_public_rooms_over_federation = False
|
||||
else:
|
||||
# If set to 'False', requires authentication to access the server's public
|
||||
# rooms directory through the client API. Defaults to 'True'.
|
||||
self.allow_public_rooms_without_auth = config.get(
|
||||
"allow_public_rooms_without_auth", True
|
||||
)
|
||||
# If set to 'False', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'True'.
|
||||
self.allow_public_rooms_over_federation = config.get(
|
||||
"allow_public_rooms_over_federation", True
|
||||
)
|
||||
|
||||
default_room_version = config.get(
|
||||
"default_room_version", DEFAULT_ROOM_VERSION,
|
||||
)
|
||||
|
||||
# Ensure room version is a str
|
||||
default_room_version = str(default_room_version)
|
||||
|
||||
if default_room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise ConfigError(
|
||||
"Unknown default_room_version: %s, known room versions: %s" %
|
||||
(default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
|
||||
)
|
||||
|
||||
# Get the actual room version object rather than just the identifier
|
||||
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
|
||||
|
||||
# whether to enable search. If disabled, new entries will not be inserted
|
||||
# into the search tables and they will not be indexed. Users will receive
|
||||
# errors when attempting to search for messages.
|
||||
@@ -183,6 +222,121 @@ class ServerConfig(Config):
|
||||
# events with profile information that differ from the target's global profile.
|
||||
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
||||
|
||||
# Whether to show the users on this homeserver in the user directory. Defaults to
|
||||
# True.
|
||||
self.show_users_in_user_directory = config.get(
|
||||
"show_users_in_user_directory", True,
|
||||
)
|
||||
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get("allowed_lifetime_min")
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get("allowed_lifetime_max")
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min > self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append({
|
||||
"interval": interval,
|
||||
"shortest_max_lifetime": shortest_max_lifetime,
|
||||
"longest_max_lifetime": longest_max_lifetime,
|
||||
})
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [{
|
||||
"interval": self.parse_duration("1d"),
|
||||
"shortest_max_lifetime": None,
|
||||
"longest_max_lifetime": None,
|
||||
}]
|
||||
|
||||
self.listeners = []
|
||||
for listener in config.get("listeners", []):
|
||||
if not isinstance(listener.get("port", None), int):
|
||||
@@ -310,6 +464,10 @@ class ServerConfig(Config):
|
||||
unsecure_port = 8008
|
||||
|
||||
pid_file = os.path.join(data_dir_path, "homeserver.pid")
|
||||
|
||||
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
|
||||
# default config string
|
||||
default_room_version = DEFAULT_ROOM_VERSION
|
||||
return """\
|
||||
## Server ##
|
||||
|
||||
@@ -378,11 +536,25 @@ class ServerConfig(Config):
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'true', requires authentication to access the server's
|
||||
# public rooms directory through the client API, and forbids any other
|
||||
# homeserver to fetch it via federation. Defaults to 'false'.
|
||||
# If set to 'false', requires authentication to access the server's public rooms
|
||||
# directory through the client API. Defaults to 'true'.
|
||||
#
|
||||
#restrict_public_rooms_to_local_users: true
|
||||
#allow_public_rooms_without_auth: false
|
||||
|
||||
# If set to 'false', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'true'.
|
||||
#
|
||||
#allow_public_rooms_over_federation: false
|
||||
|
||||
# The default room version for newly created rooms.
|
||||
#
|
||||
# Known room versions are listed here:
|
||||
# https://matrix.org/docs/spec/#complete-list-of-room-versions
|
||||
#
|
||||
# For example, for room version 1, default_room_version should be set
|
||||
# to "1".
|
||||
#
|
||||
#default_room_version: "%(default_room_version)s"
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
#
|
||||
@@ -552,6 +724,22 @@ class ServerConfig(Config):
|
||||
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Used in cases where the admin or server owner wants to limit to the
|
||||
# number of monthly active users.
|
||||
#
|
||||
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
|
||||
# anabled and a limit is reached the server returns a 'ResourceLimitError'
|
||||
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
|
||||
#
|
||||
# 'max_mau_value' is the hard limit of monthly active users above which
|
||||
# the server will start blocking user actions.
|
||||
#
|
||||
# 'mau_trial_days' is a means to add a grace period for active users. It
|
||||
# means that users must be active for this number of days before they
|
||||
# can be considered active and guards against the case where lots of users
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
@@ -582,6 +770,74 @@ class ServerConfig(Config):
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#allow_per_room_profiles: false
|
||||
|
||||
# Whether to show the users on this homeserver in the user directory. Defaults to
|
||||
# 'true'.
|
||||
#
|
||||
#show_users_in_user_directory: false
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, a user won't be able to send a
|
||||
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
|
||||
# that's not within this range. This is especially useful in closed federations,
|
||||
# in which server admins can make sure every federating server applies the same
|
||||
# rules.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
|
||||
42
synapse/config/third_party_event_rules.py
Normal file
42
synapse/config/third_party_event_rules.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class ThirdPartyRulesConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.third_party_event_rules = None
|
||||
|
||||
provider = config.get("third_party_event_rules", None)
|
||||
if provider is not None:
|
||||
self.third_party_event_rules = load_module(provider)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# Server admins can define a Python module that implements extra rules for
|
||||
# allowing or denying incoming events. In order to work, this module needs to
|
||||
# override the methods defined in synapse/events/third_party_rules.py.
|
||||
#
|
||||
# This feature is designed to be used in closed federations only, where each
|
||||
# participating server enforces the same rules.
|
||||
#
|
||||
#third_party_event_rules:
|
||||
# module: "my_custom_project.SuperRulesSet"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
@@ -74,7 +74,7 @@ class TlsConfig(Config):
|
||||
|
||||
# Whether to verify certificates on outbound federation traffic
|
||||
self.federation_verify_certificates = config.get(
|
||||
"federation_verify_certificates", False,
|
||||
"federation_verify_certificates", True,
|
||||
)
|
||||
|
||||
# Whitelist of domains to not verify certificates for
|
||||
@@ -107,7 +107,7 @@ class TlsConfig(Config):
|
||||
certs = []
|
||||
for ca_file in custom_ca_list:
|
||||
logger.debug("Reading custom CA certificate file: %s", ca_file)
|
||||
content = self.read_file(ca_file)
|
||||
content = self.read_file(ca_file, "federation_custom_ca_list")
|
||||
|
||||
# Parse the CA certificates
|
||||
try:
|
||||
@@ -241,12 +241,12 @@ class TlsConfig(Config):
|
||||
#
|
||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# Whether to verify TLS certificates when sending federation traffic.
|
||||
# Whether to verify TLS server certificates for outbound federation requests.
|
||||
#
|
||||
# This currently defaults to `false`, however this will change in
|
||||
# Synapse 1.0 when valid federation certificates will be required.
|
||||
# Defaults to `true`. To disable certificate verification, uncomment the
|
||||
# following line.
|
||||
#
|
||||
#federation_verify_certificates: true
|
||||
#federation_verify_certificates: false
|
||||
|
||||
# Skip federation certificate verification on the following whitelist
|
||||
# of domains.
|
||||
|
||||
@@ -24,6 +24,7 @@ class UserDirectoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.user_directory_search_enabled = True
|
||||
self.user_directory_search_all_users = False
|
||||
self.user_directory_defer_to_id_server = None
|
||||
user_directory_config = config.get("user_directory", None)
|
||||
if user_directory_config:
|
||||
self.user_directory_search_enabled = (
|
||||
@@ -32,6 +33,9 @@ class UserDirectoryConfig(Config):
|
||||
self.user_directory_search_all_users = (
|
||||
user_directory_config.get("search_all_users", False)
|
||||
)
|
||||
self.user_directory_defer_to_id_server = (
|
||||
user_directory_config.get("defer_to_id_server", None)
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -43,11 +47,16 @@ class UserDirectoryConfig(Config):
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||
# rebuild the user_directory search indexes, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
#user_directory:
|
||||
# enabled: true
|
||||
# search_all_users: false
|
||||
#
|
||||
# # If this is set, user search will be delegated to this ID server instead
|
||||
# # of synapse performing the search itself.
|
||||
# # This is an experimental API.
|
||||
# defer_to_id_server: https://id.example.com
|
||||
"""
|
||||
|
||||
@@ -15,10 +15,13 @@
|
||||
|
||||
import logging
|
||||
|
||||
import idna
|
||||
from service_identity import VerificationError
|
||||
from service_identity.pyopenssl import verify_hostname, verify_ip_address
|
||||
from zope.interface import implementer
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
|
||||
@@ -56,79 +59,19 @@ class ServerContextFactory(ContextFactory):
|
||||
return self._context
|
||||
|
||||
|
||||
def _idnaBytes(text):
|
||||
"""
|
||||
Convert some text typed by a human into some ASCII bytes. This is a
|
||||
copy of twisted.internet._idna._idnaBytes. For documentation, see the
|
||||
twisted documentation.
|
||||
"""
|
||||
try:
|
||||
import idna
|
||||
except ImportError:
|
||||
return text.encode("idna")
|
||||
else:
|
||||
return idna.encode(text)
|
||||
|
||||
|
||||
def _tolerateErrors(wrapped):
|
||||
"""
|
||||
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
|
||||
the error is immediately logged and the connection is dropped if possible.
|
||||
This is a copy of twisted.internet._sslverify._tolerateErrors. For
|
||||
documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def infoCallback(connection, where, ret):
|
||||
try:
|
||||
return wrapped(connection, where, ret)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
f = Failure()
|
||||
logger.exception("Error during info_callback")
|
||||
connection.get_app_data().failVerification(f)
|
||||
|
||||
return infoCallback
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class ClientTLSOptionsNoVerify(object):
|
||||
"""
|
||||
Client creator for TLS without certificate identity verification. This is a
|
||||
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
|
||||
verification left out. For documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, ctx):
|
||||
self._ctx = ctx
|
||||
|
||||
if isIPAddress(hostname) or isIPv6Address(hostname):
|
||||
self._hostnameBytes = hostname.encode('ascii')
|
||||
self._sendSNI = False
|
||||
else:
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
self._sendSNI = True
|
||||
|
||||
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
connection.set_app_data(tlsProtocol)
|
||||
return connection
|
||||
|
||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
||||
# Literal IPv4 and IPv6 addresses are not permitted
|
||||
# as host names according to the RFCs
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
|
||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
class ClientTLSOptionsFactory(object):
|
||||
"""Factory for Twisted ClientTLSOptions that are used to make connections
|
||||
to remote servers for federation."""
|
||||
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
||||
to remote servers for federation.
|
||||
|
||||
Uses one of two OpenSSL context objects for all connections, depending on whether
|
||||
we should do SSL certificate verification.
|
||||
|
||||
get_options decides whether we should do SSL certificate verification and
|
||||
constructs an SSLClientConnectionCreator factory accordingly.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self._config = config
|
||||
self._options_noverify = CertificateOptions()
|
||||
|
||||
# Check if we're using a custom list of a CA certificates
|
||||
trust_root = config.federation_ca_trust_root
|
||||
@@ -136,11 +79,13 @@ class ClientTLSOptionsFactory(object):
|
||||
# Use CA root certs provided by OpenSSL
|
||||
trust_root = platformTrust()
|
||||
|
||||
self._options_verify = CertificateOptions(trustRoot=trust_root)
|
||||
self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
|
||||
self._verify_ssl_context.set_info_callback(self._context_info_cb)
|
||||
|
||||
self._no_verify_ssl_context = CertificateOptions().getContext()
|
||||
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
|
||||
|
||||
def get_options(self, host):
|
||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
||||
|
||||
# Check if certificate verification has been enabled
|
||||
should_verify = self._config.federation_verify_certificates
|
||||
|
||||
@@ -151,6 +96,93 @@ class ClientTLSOptionsFactory(object):
|
||||
should_verify = False
|
||||
break
|
||||
|
||||
if should_verify:
|
||||
return ClientTLSOptions(host, self._options_verify._makeContext())
|
||||
return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext())
|
||||
ssl_context = (
|
||||
self._verify_ssl_context if should_verify else self._no_verify_ssl_context
|
||||
)
|
||||
|
||||
return SSLClientConnectionCreator(host, ssl_context, should_verify)
|
||||
|
||||
@staticmethod
|
||||
def _context_info_cb(ssl_connection, where, ret):
|
||||
"""The 'information callback' for our openssl context object."""
|
||||
# we assume that the app_data on the connection object has been set to
|
||||
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
|
||||
tls_protocol = ssl_connection.get_app_data()
|
||||
try:
|
||||
# ... we further assume that SSLClientConnectionCreator has set the
|
||||
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
|
||||
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
|
||||
ssl_connection, where
|
||||
)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
logger.exception("Error during info_callback")
|
||||
f = Failure()
|
||||
tls_protocol.failVerification(f)
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class SSLClientConnectionCreator(object):
|
||||
"""Creates openssl connection objects for client connections.
|
||||
|
||||
Replaces twisted.internet.ssl.ClientTLSOptions
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, ctx, verify_certs):
|
||||
self._ctx = ctx
|
||||
self._verifier = ConnectionVerifier(hostname, verify_certs)
|
||||
|
||||
def clientConnectionForTLS(self, tls_protocol):
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
|
||||
# as per twisted.internet.ssl.ClientTLSOptions, we set the application
|
||||
# data to our TLSMemoryBIOProtocol...
|
||||
connection.set_app_data(tls_protocol)
|
||||
|
||||
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||
# tls_protocol so that the SSL context's info callback has something to
|
||||
# call to do the cert verification.
|
||||
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
|
||||
return connection
|
||||
|
||||
|
||||
class ConnectionVerifier(object):
|
||||
"""Set the SNI, and do cert verification
|
||||
|
||||
This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
|
||||
the ssl context's info callback.
|
||||
"""
|
||||
|
||||
# This code is based on twisted.internet.ssl.ClientTLSOptions.
|
||||
|
||||
def __init__(self, hostname, verify_certs):
|
||||
self._verify_certs = verify_certs
|
||||
|
||||
if isIPAddress(hostname) or isIPv6Address(hostname):
|
||||
self._hostnameBytes = hostname.encode("ascii")
|
||||
self._is_ip_address = True
|
||||
else:
|
||||
# twisted's ClientTLSOptions falls back to the stdlib impl here if
|
||||
# idna is not installed, but points out that lacks support for
|
||||
# IDNA2008 (http://bugs.python.org/issue17305).
|
||||
#
|
||||
# We can rely on having idna.
|
||||
self._hostnameBytes = idna.encode(hostname)
|
||||
self._is_ip_address = False
|
||||
|
||||
self._hostnameASCII = self._hostnameBytes.decode("ascii")
|
||||
|
||||
def verify_context_info_cb(self, ssl_connection, where):
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
|
||||
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs:
|
||||
try:
|
||||
if self._is_ip_address:
|
||||
verify_ip_address(ssl_connection, self._hostnameASCII)
|
||||
else:
|
||||
verify_hostname(ssl_connection, self._hostnameASCII)
|
||||
except VerificationError:
|
||||
f = Failure()
|
||||
tls_protocol = ssl_connection.get_app_data()
|
||||
tls_protocol.failVerification(f)
|
||||
|
||||
@@ -31,7 +31,11 @@ logger = logging.getLogger(__name__)
|
||||
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
||||
"""Check whether the hash for this PDU matches the contents"""
|
||||
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
|
||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||
logger.debug(
|
||||
"Verifying content hash on %s (expecting: %s)",
|
||||
event.event_id,
|
||||
encode_base64(expected_hash),
|
||||
)
|
||||
|
||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||
# or a weird type by basically treating it the same as an unhashed event.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -76,6 +76,7 @@ class EventBuilder(object):
|
||||
# someone tries to get them when they don't exist.
|
||||
_state_key = attr.ib(default=None)
|
||||
_redacts = attr.ib(default=None)
|
||||
_origin_server_ts = attr.ib(default=None)
|
||||
|
||||
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
|
||||
|
||||
@@ -142,6 +143,9 @@ class EventBuilder(object):
|
||||
if self._redacts is not None:
|
||||
event_dict["redacts"] = self._redacts
|
||||
|
||||
if self._origin_server_ts is not None:
|
||||
event_dict["origin_server_ts"] = self._origin_server_ts
|
||||
|
||||
defer.returnValue(
|
||||
create_local_event_from_event_dict(
|
||||
clock=self._clock,
|
||||
@@ -209,6 +213,7 @@ class EventBuilderFactory(object):
|
||||
content=key_values.get("content", {}),
|
||||
unsigned=key_values.get("unsigned", {}),
|
||||
redacts=key_values.get("redacts", None),
|
||||
origin_server_ts=key_values.get("origin_server_ts", None),
|
||||
)
|
||||
|
||||
|
||||
@@ -245,7 +250,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key,
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
event_dict["origin_server_ts"] = time_now
|
||||
event_dict.setdefault("origin_server_ts", time_now)
|
||||
|
||||
event_dict.setdefault("unsigned", {})
|
||||
age = event_dict["unsigned"].pop("age", 0)
|
||||
|
||||
@@ -46,13 +46,26 @@ class SpamChecker(object):
|
||||
|
||||
return self.spam_checker.check_event_for_spam(event)
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, third_party_invite,
|
||||
room_id, new_room, published_room):
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
If this method returns false, the invite will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
inviter_userid (str)
|
||||
invitee_userid (str|None): The user ID of the invitee. Is None
|
||||
if this is a third party invite and the 3PID is not bound to a
|
||||
user ID.
|
||||
third_party_invite (dict|None): If a third party invite then is a
|
||||
dict containing the medium and address of the invitee.
|
||||
room_id (str)
|
||||
new_room (bool): Whether the user is being invited to the room as
|
||||
part of a room creation, if so the invitee would have been
|
||||
included in the call to `user_may_create_room`.
|
||||
published_room (bool): Whether the room the user is being invited
|
||||
to has been published in the local homeserver's public room
|
||||
directory.
|
||||
|
||||
Returns:
|
||||
bool: True if the user may send an invite, otherwise False
|
||||
@@ -60,15 +73,25 @@ class SpamChecker(object):
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id)
|
||||
return self.spam_checker.user_may_invite(
|
||||
inviter_userid, invitee_userid, third_party_invite, room_id, new_room,
|
||||
published_room,
|
||||
)
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
def user_may_create_room(self, userid, invite_list, third_party_invite_list,
|
||||
cloning):
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
invite_list (list[str]): List of user IDs that would be invited to
|
||||
the new room.
|
||||
third_party_invite_list (list[dict]): List of third party invites
|
||||
for the new room.
|
||||
cloning (bool): Whether the user is cloning an existing room, e.g.
|
||||
upgrading a room.
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room, otherwise False
|
||||
@@ -76,7 +99,9 @@ class SpamChecker(object):
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room(userid)
|
||||
return self.spam_checker.user_may_create_room(
|
||||
userid, invite_list, third_party_invite_list, cloning,
|
||||
)
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
"""Checks if a given user may create a room alias
|
||||
@@ -111,3 +136,21 @@ class SpamChecker(object):
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||
|
||||
def user_may_join_room(self, userid, room_id, is_invited):
|
||||
"""Checks if a given users is allowed to join a room.
|
||||
|
||||
Is not called when the user creates a room.
|
||||
|
||||
Args:
|
||||
userid (str)
|
||||
room_id (str)
|
||||
is_invited (bool): Whether the user is invited into the room
|
||||
|
||||
Returns:
|
||||
bool: Whether the user may join the room
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_join_room(userid, room_id, is_invited)
|
||||
|
||||
114
synapse/events/third_party_rules.py
Normal file
114
synapse/events/third_party_rules.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
|
||||
class ThirdPartyEventRules(object):
|
||||
"""Allows server admins to provide a Python module implementing an extra
|
||||
set of rules to apply when processing events.
|
||||
|
||||
This is designed to help admins of closed federations with enforcing custom
|
||||
behaviours.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.third_party_rules = None
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
module = None
|
||||
config = None
|
||||
if hs.config.third_party_event_rules:
|
||||
module, config = hs.config.third_party_event_rules
|
||||
|
||||
if module is not None:
|
||||
self.third_party_rules = module(
|
||||
config=config,
|
||||
http_client=hs.get_simple_http_client(),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_event_allowed(self, event, context):
|
||||
"""Check if a provided event should be allowed in the given context.
|
||||
|
||||
Args:
|
||||
event (synapse.events.EventBase): The event to be checked.
|
||||
context (synapse.events.snapshot.EventContext): The context of the event.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
||||
"""
|
||||
if self.third_party_rules is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
|
||||
# Retrieve the state events from the database.
|
||||
state_events = {}
|
||||
for key, event_id in prev_state_ids.items():
|
||||
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_create_room(self, requester, config, is_requester_admin):
|
||||
"""Intercept requests to create room to allow, deny or update the
|
||||
request config.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
config (dict): The creation config from the client.
|
||||
is_requester_admin (bool): If the requester is an admin
|
||||
|
||||
Returns:
|
||||
defer.Deferred
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return
|
||||
|
||||
yield self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_threepid_can_be_invited(self, medium, address, room_id):
|
||||
"""Check if a provided 3PID can be invited in the given room.
|
||||
|
||||
Args:
|
||||
medium (str): The 3PID's medium.
|
||||
address (str): The 3PID's address.
|
||||
room_id (str): The room we want to invite the threepid to.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool], True if the 3PID can be invited, False if not.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
||||
room_state_events = yield self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
@@ -330,12 +330,13 @@ class EventClientSerializer(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize_event(self, event, time_now, **kwargs):
|
||||
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
event (EventBase)
|
||||
time_now (int): The current time in milliseconds
|
||||
bundle_aggregations (bool): Whether to bundle in related events
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
@@ -350,7 +351,7 @@ class EventClientSerializer(object):
|
||||
|
||||
# If MSC1849 is enabled then we need to look if thre are any relations
|
||||
# we need to bundle in with the event
|
||||
if self.experimental_msc1849_support_enabled:
|
||||
if self.experimental_msc1849_support_enabled and bundle_aggregations:
|
||||
annotations = yield self.store.get_aggregation_groups_for_event(
|
||||
event_id,
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six import string_types
|
||||
from six import integer_types, string_types
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -22,11 +22,12 @@ from synapse.types import EventID, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator(object):
|
||||
def validate_new(self, event):
|
||||
def validate_new(self, event, config):
|
||||
"""Validates the event has roughly the right format
|
||||
|
||||
Args:
|
||||
event (FrozenEvent)
|
||||
event (FrozenEvent): The event to validate.
|
||||
config (Config): The homeserver's configuration.
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
@@ -67,6 +68,99 @@ class EventValidator(object):
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Retention:
|
||||
self._validate_retention(event, config)
|
||||
|
||||
def _validate_retention(self, event, config):
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
boundaries imposed by the server's administrator.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent): The event to validate.
|
||||
config (Config): The homeserver's configuration.
|
||||
"""
|
||||
min_lifetime = event.content.get("min_lifetime")
|
||||
max_lifetime = event.content.get("max_lifetime")
|
||||
|
||||
if min_lifetime is not None:
|
||||
if not isinstance(min_lifetime, integer_types):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'min_lifetime' must be an integer",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_min is not None
|
||||
and min_lifetime < config.retention_allowed_lifetime_min
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'min_lifetime' can't be lower than the minimum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_max is not None
|
||||
and min_lifetime > config.retention_allowed_lifetime_max
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'min_lifetime' can't be greater than the maximum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if max_lifetime is not None:
|
||||
if not isinstance(max_lifetime, integer_types):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'max_lifetime' must be an integer",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_min is not None
|
||||
and max_lifetime < config.retention_allowed_lifetime_min
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'max_lifetime' can't be lower than the minimum allowed value"
|
||||
" enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_max is not None
|
||||
and max_lifetime > config.retention_allowed_lifetime_max
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'max_lifetime' can't be greater than the maximum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
min_lifetime is not None
|
||||
and max_lifetime is not None
|
||||
and min_lifetime > max_lifetime
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'min_lifetime' can't be greater than 'max_lifetime",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def validate_builder(self, event):
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
|
||||
@@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
the signatures are valid, or fail (with a SynapseError) if not.
|
||||
"""
|
||||
|
||||
# (currently this is written assuming the v1 room structure; we'll probably want a
|
||||
# separate function for checking v2 rooms)
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
#
|
||||
# (a) the sender's server
|
||||
@@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
for p in pdus
|
||||
]
|
||||
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
|
||||
# First we check that the sender event is signed by the sender's domain
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
pdus_to_check_sender = [
|
||||
@@ -264,10 +265,17 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
if not _is_invite_via_3pid(p.pdu)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.sender_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check_sender
|
||||
])
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
p.sender_domain,
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_sender
|
||||
]
|
||||
)
|
||||
|
||||
def sender_err(e, pdu_to_check):
|
||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||
@@ -287,20 +295,23 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
|
||||
if v.event_format == EventFormatVersions.V1:
|
||||
pdus_to_check_event_id = [
|
||||
p for p in pdus_to_check
|
||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
|
||||
for p in pdus_to_check_event_id
|
||||
])
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
get_domain_from_id(p.pdu.event_id),
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_event_id
|
||||
]
|
||||
)
|
||||
|
||||
def event_err(e, pdu_to_check):
|
||||
errmsg = (
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
import random
|
||||
|
||||
from six.moves import range
|
||||
|
||||
@@ -233,7 +232,8 @@ class FederationClient(FederationBase):
|
||||
moving to the next destination. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the requested PDU.
|
||||
Deferred: Results in the requested PDU, or None if we were unable to find
|
||||
it.
|
||||
"""
|
||||
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
@@ -258,7 +258,12 @@ class FederationClient(FederationBase):
|
||||
destination, event_id, timeout=timeout,
|
||||
)
|
||||
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
logger.debug(
|
||||
"retrieved event id %s from %s: %r",
|
||||
event_id,
|
||||
destination,
|
||||
transaction_data,
|
||||
)
|
||||
|
||||
pdu_list = [
|
||||
event_from_pdu_json(p, format_ver, outlier=outlier)
|
||||
@@ -280,6 +285,7 @@ class FederationClient(FederationBase):
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(str(e))
|
||||
continue
|
||||
@@ -326,12 +332,16 @@ class FederationClient(FederationBase):
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
fetched_events, failed_to_fetch = yield self.get_events(
|
||||
[destination], room_id, set(state_event_ids + auth_event_ids)
|
||||
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
|
||||
destination, room_id, set(state_event_ids + auth_event_ids)
|
||||
)
|
||||
|
||||
if failed_to_fetch:
|
||||
logger.warn("Failed to get %r", failed_to_fetch)
|
||||
logger.warning(
|
||||
"Failed to fetch missing state/auth events for %s: %s",
|
||||
room_id,
|
||||
failed_to_fetch
|
||||
)
|
||||
|
||||
event_map = {
|
||||
ev.event_id: ev for ev in fetched_events
|
||||
@@ -397,27 +407,20 @@ class FederationClient(FederationBase):
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
||||
"""Fetch events from some remote destinations, checking if we already
|
||||
have them.
|
||||
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
||||
"""Fetch events from a remote destination, checking if we already have them.
|
||||
|
||||
Args:
|
||||
destinations (list)
|
||||
destination (str)
|
||||
room_id (str)
|
||||
event_ids (list)
|
||||
return_local (bool): Whether to include events we already have in
|
||||
the DB in the returned list of events
|
||||
|
||||
Returns:
|
||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||
events and the second is a list of event ids that we failed to fetch.
|
||||
"""
|
||||
if return_local:
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = list(seen_events.values())
|
||||
else:
|
||||
seen_events = yield self.store.have_seen_events(event_ids)
|
||||
signed_events = []
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = list(seen_events.values())
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
@@ -428,10 +431,11 @@ class FederationClient(FederationBase):
|
||||
if not missing_events:
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(destinations)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
logger.debug(
|
||||
"Fetching unknown state/auth events %s for room %s",
|
||||
missing_events,
|
||||
event_ids,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
@@ -443,7 +447,7 @@ class FederationClient(FederationBase):
|
||||
deferreds = [
|
||||
run_in_background(
|
||||
self.get_pdu,
|
||||
destinations=random_server_list(),
|
||||
destinations=[destination],
|
||||
event_id=e_id,
|
||||
room_version=room_version,
|
||||
)
|
||||
|
||||
@@ -349,9 +349,10 @@ class PerDestinationQueue(object):
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self, limit):
|
||||
last_device_list = self._last_device_list_stream_id
|
||||
# Will return at most 20 entries
|
||||
|
||||
# Retrieve list of new device updates to send to the destination
|
||||
now_stream_id, results = yield self._store.get_devices_by_remote(
|
||||
self._destination, last_device_list
|
||||
self._destination, last_device_list, limit=limit,
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
|
||||
@@ -23,7 +23,11 @@ from twisted.internet import defer
|
||||
import synapse
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@@ -90,6 +94,7 @@ class NoAuthenticationError(AuthenticationError):
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
self._clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
@@ -98,6 +103,7 @@ class Authenticator(object):
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request, content):
|
||||
now = self._clock.time_msec()
|
||||
json_request = {
|
||||
"method": request.method.decode('ascii'),
|
||||
"uri": request.uri.decode('ascii'),
|
||||
@@ -134,7 +140,9 @@ class Authenticator(object):
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
yield self.keyring.verify_json_for_server(origin, json_request)
|
||||
yield self.keyring.verify_json_for_server(
|
||||
origin, json_request, now, "Incoming request"
|
||||
)
|
||||
|
||||
logger.info("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
@@ -712,15 +720,15 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
PATH = "/publicRooms"
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access):
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access):
|
||||
super(PublicRoomList, self).__init__(
|
||||
handler, authenticator, ratelimiter, server_name,
|
||||
)
|
||||
self.deny_access = deny_access
|
||||
self.allow_access = allow_access
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
if self.deny_access:
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
@@ -1304,6 +1312,30 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class RoomComplexityServlet(BaseFederationServlet):
|
||||
"""
|
||||
Indicates to other servers how complex (and therefore likely
|
||||
resource-intensive) a public room this server knows about is.
|
||||
"""
|
||||
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, room_id):
|
||||
|
||||
store = self.handler.hs.get_datastore()
|
||||
|
||||
is_public = yield store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
if not is_public:
|
||||
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
|
||||
|
||||
complexity = yield store.get_room_complexity(room_id)
|
||||
defer.returnValue((200, complexity))
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
@@ -1327,6 +1359,7 @@ FEDERATION_SERVLET_CLASSES = (
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
FederationVersionServlet,
|
||||
RoomComplexityServlet,
|
||||
)
|
||||
|
||||
OPENID_SERVLET_CLASSES = (
|
||||
@@ -1422,7 +1455,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
deny_access=hs.config.restrict_public_rooms_to_local_users,
|
||||
allow_access=hs.config.allow_public_rooms_over_federation,
|
||||
).register(resource)
|
||||
|
||||
if "group_server" in servlet_groups:
|
||||
|
||||
@@ -97,10 +97,13 @@ class GroupAttestationSigning(object):
|
||||
|
||||
# TODO: We also want to check that *new* attestations that people give
|
||||
# us to store are valid for at least a little while.
|
||||
if valid_until_ms < self.clock.time_msec():
|
||||
now = self.clock.time_msec()
|
||||
if valid_until_ms < now:
|
||||
raise SynapseError(400, "Attestation expired")
|
||||
|
||||
yield self.keyring.verify_json_for_server(server_name, attestation)
|
||||
yield self.keyring.verify_json_for_server(
|
||||
server_name, attestation, now, "Group attestation"
|
||||
)
|
||||
|
||||
def create_attestation(self, group_id, user_id):
|
||||
"""Create an attestation for the group_id and user_id with default
|
||||
|
||||
@@ -110,6 +110,9 @@ class AccountValidityHandler(object):
|
||||
# Stop right here if the user doesn't have at least one email address.
|
||||
# In this case, they will have to ask their server admin to renew their
|
||||
# account manually.
|
||||
# We don't need to do a specific check to make sure the account isn't
|
||||
# deactivated, as a deactivated account isn't supposed to have any
|
||||
# email address attached to it.
|
||||
if not addresses:
|
||||
return
|
||||
|
||||
@@ -220,11 +223,19 @@ class AccountValidityHandler(object):
|
||||
|
||||
Args:
|
||||
renewal_token (str): Token sent with the renewal request.
|
||||
Returns:
|
||||
bool: Whether the provided token is valid.
|
||||
"""
|
||||
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
|
||||
try:
|
||||
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
|
||||
except StoreError:
|
||||
defer.returnValue(False)
|
||||
|
||||
logger.debug("Renewing an account for user %s", user_id)
|
||||
yield self.renew_account_for_user(user_id)
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
|
||||
"""Renews the account attached to a given user by pushing back the
|
||||
|
||||
@@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
|
||||
defer.returnValue(params)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth(self, flows, clientdict, clientip):
|
||||
def check_auth(self, flows, clientdict, clientip, password_servlet=False):
|
||||
"""
|
||||
Takes a dictionary sent by the client in the login / registration
|
||||
protocol and handles the User-Interactive Auth flow.
|
||||
@@ -186,6 +186,16 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
clientip (str): The IP address of the client.
|
||||
|
||||
password_servlet (bool): Whether the request originated from
|
||||
PasswordRestServlet.
|
||||
XXX: This is a temporary hack to distinguish between checking
|
||||
for threepid validations locally (in the case of password
|
||||
resets) and using the identity server (in the case of binding
|
||||
a 3PID during registration). Once we start using the
|
||||
homeserver for both tasks, this distinction will no longer be
|
||||
necessary.
|
||||
|
||||
|
||||
Returns:
|
||||
defer.Deferred[dict, dict, str]: a deferred tuple of
|
||||
(creds, params, session_id).
|
||||
@@ -241,7 +251,9 @@ class AuthHandler(BaseHandler):
|
||||
if 'type' in authdict:
|
||||
login_type = authdict['type']
|
||||
try:
|
||||
result = yield self._check_auth_dict(authdict, clientip)
|
||||
result = yield self._check_auth_dict(
|
||||
authdict, clientip, password_servlet=password_servlet,
|
||||
)
|
||||
if result:
|
||||
creds[login_type] = result
|
||||
self._save_session(session)
|
||||
@@ -351,7 +363,7 @@ class AuthHandler(BaseHandler):
|
||||
return sess.setdefault('serverdict', {}).get(key, default)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_auth_dict(self, authdict, clientip):
|
||||
def _check_auth_dict(self, authdict, clientip, password_servlet=False):
|
||||
"""Attempt to validate the auth dict provided by a client
|
||||
|
||||
Args:
|
||||
@@ -369,7 +381,13 @@ class AuthHandler(BaseHandler):
|
||||
login_type = authdict['type']
|
||||
checker = self.checkers.get(login_type)
|
||||
if checker is not None:
|
||||
res = yield checker(authdict, clientip)
|
||||
# XXX: Temporary workaround for having Synapse handle password resets
|
||||
# See AuthHandler.check_auth for further details
|
||||
res = yield checker(
|
||||
authdict,
|
||||
clientip=clientip,
|
||||
password_servlet=password_servlet,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
# build a v1-login-style dict out of the authdict and fall back to the
|
||||
@@ -383,7 +401,7 @@ class AuthHandler(BaseHandler):
|
||||
defer.returnValue(canonical_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_recaptcha(self, authdict, clientip):
|
||||
def _check_recaptcha(self, authdict, clientip, **kwargs):
|
||||
try:
|
||||
user_response = authdict["response"]
|
||||
except KeyError:
|
||||
@@ -429,20 +447,20 @@ class AuthHandler(BaseHandler):
|
||||
defer.returnValue(True)
|
||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||
|
||||
def _check_email_identity(self, authdict, _):
|
||||
return self._check_threepid('email', authdict)
|
||||
def _check_email_identity(self, authdict, **kwargs):
|
||||
return self._check_threepid('email', authdict, **kwargs)
|
||||
|
||||
def _check_msisdn(self, authdict, _):
|
||||
def _check_msisdn(self, authdict, **kwargs):
|
||||
return self._check_threepid('msisdn', authdict)
|
||||
|
||||
def _check_dummy_auth(self, authdict, _):
|
||||
def _check_dummy_auth(self, authdict, **kwargs):
|
||||
return defer.succeed(True)
|
||||
|
||||
def _check_terms_auth(self, authdict, _):
|
||||
def _check_terms_auth(self, authdict, **kwargs):
|
||||
return defer.succeed(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_threepid(self, medium, authdict):
|
||||
def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
|
||||
if 'threepid_creds' not in authdict:
|
||||
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
|
||||
|
||||
@@ -451,7 +469,30 @@ class AuthHandler(BaseHandler):
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
|
||||
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
|
||||
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
|
||||
if (
|
||||
not password_servlet
|
||||
or self.hs.config.email_password_reset_behaviour == "remote"
|
||||
):
|
||||
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
|
||||
elif self.hs.config.email_password_reset_behaviour == "local":
|
||||
row = yield self.store.get_threepid_validation_session(
|
||||
medium,
|
||||
threepid_creds["client_secret"],
|
||||
sid=threepid_creds["sid"],
|
||||
validated=True,
|
||||
)
|
||||
|
||||
threepid = {
|
||||
"medium": row["medium"],
|
||||
"address": row["address"],
|
||||
"validated_at": row["validated_at"],
|
||||
} if row else None
|
||||
|
||||
if row:
|
||||
# Valid threepid returned, delete from the db
|
||||
yield self.store.delete_threepid_session(threepid_creds["sid"])
|
||||
else:
|
||||
raise SynapseError(400, "Password resets are not enabled on this homeserver")
|
||||
|
||||
if not threepid:
|
||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017, 2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -33,6 +34,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._identity_handler = hs.get_handlers().identity_handler
|
||||
self._profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
@@ -42,6 +44,8 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
# it left off (if it has work left to do).
|
||||
hs.get_reactor().callWhenRunning(self._start_user_parting)
|
||||
|
||||
self._account_validity_enabled = hs.config.account_validity.enabled
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deactivate_account(self, user_id, erase_data, id_server=None):
|
||||
"""Deactivate a user's account
|
||||
@@ -98,6 +102,9 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
|
||||
yield self.store.user_set_password_hash(user_id, None)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
yield self._profile_handler.set_active(user, False, False)
|
||||
|
||||
# Add the user to a table of users pending deactivation (ie.
|
||||
# removal from all the rooms they're a member of)
|
||||
yield self.store.add_user_pending_deactivation(user_id)
|
||||
@@ -114,6 +121,13 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
# parts users from rooms (if it isn't already running)
|
||||
self._start_user_parting()
|
||||
|
||||
# Remove all information on the user from the account_validity table.
|
||||
if self._account_validity_enabled:
|
||||
yield self.store.delete_account_validity_for_user(user_id)
|
||||
|
||||
# Mark the user as deactivated.
|
||||
yield self.store.set_user_deactivated_status(user_id, True)
|
||||
|
||||
defer.returnValue(identity_server_supports_unbinding)
|
||||
|
||||
def _start_user_parting(self):
|
||||
|
||||
@@ -568,6 +568,12 @@ class DeviceListEduUpdater(object):
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
|
||||
for device in devices:
|
||||
logger.debug(
|
||||
"Handling resync update %r/%r, ID: %r",
|
||||
user_id, device["device_id"], stream_id,
|
||||
)
|
||||
|
||||
# If the remote server has more than ~1000 devices for this user
|
||||
# we assume that something is going horribly wrong (e.g. a bot
|
||||
# that logs in and creates a new device every time it tries to
|
||||
|
||||
@@ -122,6 +122,9 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
chunks = yield self._event_serializer.serialize_events(
|
||||
events, time_now, as_client_event=as_client_event,
|
||||
# We don't bundle "live" events, as otherwise clients
|
||||
# will end up double counting annotations.
|
||||
bundle_aggregations=False,
|
||||
)
|
||||
|
||||
chunk = {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -33,8 +34,10 @@ from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
FederationError,
|
||||
RequestSendFailed,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
@@ -126,6 +129,8 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues = {}
|
||||
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_receive_pdu(
|
||||
self, origin, pdu, sent_to_us_directly=False,
|
||||
@@ -1257,6 +1262,15 @@ class FederationHandler(BaseHandler):
|
||||
logger.warn("Failed to create join %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info("Creation of join %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_join_request`
|
||||
yield self.auth.check_from_context(
|
||||
@@ -1299,6 +1313,15 @@ class FederationHandler(BaseHandler):
|
||||
origin, event
|
||||
)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info("Sending of join %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
|
||||
event.event_id,
|
||||
@@ -1340,8 +1363,12 @@ class FederationHandler(BaseHandler):
|
||||
if self.hs.config.block_non_admin_invites:
|
||||
raise SynapseError(403, "This server does not accept room invites")
|
||||
|
||||
is_published = yield self.store.is_room_published(event.room_id)
|
||||
|
||||
if not self.spam_checker.user_may_invite(
|
||||
event.sender, event.state_key, event.room_id,
|
||||
event.sender, event.state_key, None,
|
||||
room_id=event.room_id, new_room=False,
|
||||
published_room=is_published,
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "This user is not permitted to send invites to this server/user"
|
||||
@@ -1457,6 +1484,15 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.warning("Creation of leave %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
try:
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_leave_request`
|
||||
@@ -1483,10 +1519,19 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event.internal_metadata.outlier = False
|
||||
|
||||
yield self._handle_new_event(
|
||||
context = yield self._handle_new_event(
|
||||
origin, event
|
||||
)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info("Sending of leave %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
|
||||
event.event_id,
|
||||
@@ -2013,15 +2058,65 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
Args:
|
||||
origin (str):
|
||||
event (synapse.events.FrozenEvent):
|
||||
event (synapse.events.EventBase):
|
||||
context (synapse.events.snapshot.EventContext):
|
||||
auth_events (dict[(str, str)->str]):
|
||||
auth_events (dict[(str, str)->synapse.events.EventBase]):
|
||||
Map from (event_type, state_key) to event
|
||||
|
||||
What we expect the event's auth_events to be, based on the event's
|
||||
position in the dag. I think? maybe??
|
||||
|
||||
Also NB that this function adds entries to it.
|
||||
Returns:
|
||||
defer.Deferred[None]
|
||||
"""
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
try:
|
||||
yield self._update_auth_events_and_context_for_auth(
|
||||
origin, event, context, auth_events
|
||||
)
|
||||
except Exception:
|
||||
# We don't really mind if the above fails, so lets not fail
|
||||
# processing if it does. However, it really shouldn't fail so
|
||||
# let's still log as an exception since we'll still want to fix
|
||||
# any bugs.
|
||||
logger.exception(
|
||||
"Failed to double check auth events for %s with remote. "
|
||||
"Ignoring failure and continuing processing of event.",
|
||||
event.event_id,
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_auth_events_and_context_for_auth(
|
||||
self, origin, event, context, auth_events
|
||||
):
|
||||
"""Helper for do_auth. See there for docs.
|
||||
|
||||
Checks whether a given event has the expected auth events. If it
|
||||
doesn't then we talk to the remote server to compare state to see if
|
||||
we can come to a consensus (e.g. if one server missed some valid
|
||||
state).
|
||||
|
||||
This attempts to resovle any potential divergence of state between
|
||||
servers, but is not essential and so failures should not block further
|
||||
processing of the event.
|
||||
|
||||
Args:
|
||||
origin (str):
|
||||
event (synapse.events.EventBase):
|
||||
context (synapse.events.snapshot.EventContext):
|
||||
auth_events (dict[(str, str)->synapse.events.EventBase]):
|
||||
|
||||
Returns:
|
||||
defer.Deferred[None]
|
||||
"""
|
||||
# Check if we have all the auth events.
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
|
||||
if event.is_state():
|
||||
@@ -2029,11 +2124,21 @@ class FederationHandler(BaseHandler):
|
||||
else:
|
||||
event_key = None
|
||||
|
||||
if event_auth_events - current_state:
|
||||
# if the event's auth_events refers to events which are not in our
|
||||
# calculated auth_events, we need to fetch those events from somewhere.
|
||||
#
|
||||
# we start by fetching them from the store, and then try calling /event_auth/.
|
||||
missing_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
if missing_auth:
|
||||
# TODO: can we use store.have_seen_events here instead?
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
event_auth_events - current_state
|
||||
missing_auth
|
||||
)
|
||||
logger.debug("Got events %s from store", have_events)
|
||||
missing_auth.difference_update(have_events.keys())
|
||||
else:
|
||||
have_events = {}
|
||||
|
||||
@@ -2042,17 +2147,22 @@ class FederationHandler(BaseHandler):
|
||||
for e in auth_events.values()
|
||||
})
|
||||
|
||||
seen_events = set(have_events.keys())
|
||||
|
||||
missing_auth = event_auth_events - seen_events - current_state
|
||||
|
||||
if missing_auth:
|
||||
logger.info("Missing auth: %s", missing_auth)
|
||||
# If we don't have all the auth events, we need to get them.
|
||||
logger.info(
|
||||
"auth_events contains unknown events: %s",
|
||||
missing_auth,
|
||||
)
|
||||
try:
|
||||
remote_auth_chain = yield self.federation_client.get_event_auth(
|
||||
origin, event.room_id, event.event_id
|
||||
)
|
||||
try:
|
||||
remote_auth_chain = yield self.federation_client.get_event_auth(
|
||||
origin, event.room_id, event.event_id
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to get event auth from remote: %s", e)
|
||||
return
|
||||
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in remote_auth_chain]
|
||||
@@ -2089,145 +2199,174 @@ class FederationHandler(BaseHandler):
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
event.auth_event_ids()
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to get auth chain")
|
||||
|
||||
if event.internal_metadata.is_outlier():
|
||||
logger.info("Skipping auth_event fetch for outlier")
|
||||
return
|
||||
|
||||
# FIXME: Assumes we have and stored all the state for all the
|
||||
# prev_events
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
different_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
if not different_auth:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"auth_events refers to events which are not in our calculated auth "
|
||||
"chain: %s",
|
||||
different_auth,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
# Do auth conflict res.
|
||||
logger.info("Different auth: %s", different_auth)
|
||||
|
||||
different_events = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults([
|
||||
logcontext.run_in_background(
|
||||
self.store.get_event,
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
)
|
||||
for d in different_auth
|
||||
if d in have_events and not have_events[d]
|
||||
], consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
if different_events:
|
||||
local_view = dict(auth_events)
|
||||
remote_view = dict(auth_events)
|
||||
remote_view.update({
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
new_state = yield self.state_handler.resolve_events(
|
||||
room_version,
|
||||
[list(local_view.values()), list(remote_view.values())],
|
||||
event
|
||||
different_events = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults([
|
||||
logcontext.run_in_background(
|
||||
self.store.get_event,
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
)
|
||||
for d in different_auth
|
||||
if d in have_events and not have_events[d]
|
||||
], consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
auth_events.update(new_state)
|
||||
if different_events:
|
||||
local_view = dict(auth_events)
|
||||
remote_view = dict(auth_events)
|
||||
remote_view.update({
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
new_state = yield self.state_handler.resolve_events(
|
||||
room_version,
|
||||
[list(local_view.values()), list(remote_view.values())],
|
||||
event
|
||||
)
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
logger.info(
|
||||
"After state res: updating auth_events with new state %s",
|
||||
{
|
||||
(d.type, d.state_key): d.event_id for d in new_state.values()
|
||||
if auth_events.get((d.type, d.state_key)) != d
|
||||
},
|
||||
)
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
logger.info("Different auth after resolution: %s", different_auth)
|
||||
auth_events.update(new_state)
|
||||
|
||||
# Only do auth resolution if we have something new to say.
|
||||
# We can't rove an auth failure.
|
||||
do_resolution = False
|
||||
different_auth = event_auth_events.difference(
|
||||
e.event_id for e in auth_events.values()
|
||||
)
|
||||
|
||||
provable = [
|
||||
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
|
||||
]
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
|
||||
for e_id in different_auth:
|
||||
if e_id in have_events:
|
||||
if have_events[e_id] in provable:
|
||||
do_resolution = True
|
||||
break
|
||||
if not different_auth:
|
||||
# we're done
|
||||
return
|
||||
|
||||
if do_resolution:
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
# 1. Get what we think is the auth chain.
|
||||
auth_ids = yield self.auth.compute_auth_events(
|
||||
event, prev_state_ids
|
||||
)
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
auth_ids, include_given=True
|
||||
)
|
||||
logger.info(
|
||||
"auth_events still refers to events which are not in the calculated auth "
|
||||
"chain after state resolution: %s",
|
||||
different_auth,
|
||||
)
|
||||
|
||||
try:
|
||||
# 2. Get remote difference.
|
||||
result = yield self.federation_client.query_auth(
|
||||
origin,
|
||||
event.room_id,
|
||||
event.event_id,
|
||||
local_auth_chain,
|
||||
)
|
||||
# Only do auth resolution if we have something new to say.
|
||||
# We can't prove an auth failure.
|
||||
do_resolution = False
|
||||
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in result["auth_chain"]]
|
||||
)
|
||||
for e_id in different_auth:
|
||||
if e_id in have_events:
|
||||
if have_events[e_id] == RejectedReason.NOT_ANCESTOR:
|
||||
do_resolution = True
|
||||
break
|
||||
|
||||
# 3. Process any remote auth chain events we haven't seen.
|
||||
for ev in result["auth_chain"]:
|
||||
if ev.event_id in seen_remotes:
|
||||
continue
|
||||
if not do_resolution:
|
||||
logger.info(
|
||||
"Skipping auth resolution due to lack of provable rejection reasons"
|
||||
)
|
||||
return
|
||||
|
||||
if ev.event_id == event.event_id:
|
||||
continue
|
||||
logger.info("Doing auth resolution")
|
||||
|
||||
try:
|
||||
auth_ids = ev.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
if e.event_id in auth_ids
|
||||
or event.type == EventTypes.Create
|
||||
}
|
||||
ev.internal_metadata.outlier = True
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
|
||||
logger.debug(
|
||||
"do_auth %s different_auth: %s",
|
||||
event.event_id, e.event_id
|
||||
)
|
||||
|
||||
yield self._handle_new_event(
|
||||
origin, ev, auth_events=auth
|
||||
)
|
||||
|
||||
if ev.event_id in event_auth_events:
|
||||
auth_events[(ev.type, ev.state_key)] = ev
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to query auth chain")
|
||||
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
# 1. Get what we think is the auth chain.
|
||||
auth_ids = yield self.auth.compute_auth_events(
|
||||
event, prev_state_ids
|
||||
)
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
auth_ids, include_given=True
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
# 2. Get remote difference.
|
||||
try:
|
||||
result = yield self.federation_client.query_auth(
|
||||
origin,
|
||||
event.room_id,
|
||||
event.event_id,
|
||||
local_auth_chain,
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
# The other side isn't around or doesn't implement the
|
||||
# endpoint, so lets just bail out.
|
||||
logger.info("Failed to query auth from remote: %s", e)
|
||||
return
|
||||
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in result["auth_chain"]]
|
||||
)
|
||||
|
||||
# 3. Process any remote auth chain events we haven't seen.
|
||||
for ev in result["auth_chain"]:
|
||||
if ev.event_id in seen_remotes:
|
||||
continue
|
||||
|
||||
if ev.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = ev.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
if e.event_id in auth_ids
|
||||
or event.type == EventTypes.Create
|
||||
}
|
||||
ev.internal_metadata.outlier = True
|
||||
|
||||
logger.debug(
|
||||
"do_auth %s different_auth: %s",
|
||||
event.event_id, e.event_id
|
||||
)
|
||||
|
||||
yield self._handle_new_event(
|
||||
origin, ev, auth_events=auth
|
||||
)
|
||||
|
||||
if ev.event_id in event_auth_events:
|
||||
auth_events[(ev.type, ev.state_key)] = ev
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to query auth chain")
|
||||
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_context_for_auth_events(self, event, context, auth_events,
|
||||
@@ -2455,11 +2594,23 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder
|
||||
)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info(
|
||||
"Creation of threepid invite %s forbidden by third-party rules",
|
||||
event,
|
||||
)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
room_version, event_dict, event, context
|
||||
)
|
||||
|
||||
EventValidator().validate_new(event)
|
||||
EventValidator().validate_new(event, self.config)
|
||||
|
||||
# We need to tell the transaction queue to send this out, even
|
||||
# though the sender isn't a local user.
|
||||
@@ -2503,6 +2654,18 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.warning(
|
||||
"Exchange of threepid invite %s forbidden by third-party rules",
|
||||
event,
|
||||
)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
room_version, event_dict, event, context
|
||||
)
|
||||
@@ -2518,12 +2681,6 @@ class FederationHandler(BaseHandler):
|
||||
# though the sender isn't a local user.
|
||||
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
|
||||
|
||||
# XXX we send the invite here, but send_membership_event also sends it,
|
||||
# so we end up making two requests. I think this is redundant.
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(returned_invite.signatures)
|
||||
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
@@ -2558,7 +2715,7 @@ class FederationHandler(BaseHandler):
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
EventValidator().validate_new(event)
|
||||
EventValidator().validate_new(event, self.config)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018, 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,13 +20,18 @@
|
||||
import logging
|
||||
|
||||
from canonicaljson import json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
ProxiedRequestError,
|
||||
SynapseError,
|
||||
)
|
||||
|
||||
@@ -47,6 +52,8 @@ class IdentityHandler(BaseHandler):
|
||||
self.trust_any_id_server_just_for_testing_do_not_use = (
|
||||
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
|
||||
)
|
||||
self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls
|
||||
self._enable_lookup = hs.config.enable_3pid_lookup
|
||||
|
||||
def _should_trust_id_server(self, id_server):
|
||||
if id_server not in self.trusted_id_servers:
|
||||
@@ -84,7 +91,10 @@ class IdentityHandler(BaseHandler):
|
||||
'credentials', id_server
|
||||
)
|
||||
defer.returnValue(None)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.get_json(
|
||||
"https://%s%s" % (
|
||||
@@ -120,10 +130,18 @@ class IdentityHandler(BaseHandler):
|
||||
else:
|
||||
raise SynapseError(400, "No client_secret in creds")
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now, but only for sending the request (not
|
||||
# storing in the database).
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server_host = self.rewrite_identity_server_urls[id_server]
|
||||
else:
|
||||
id_server_host = id_server
|
||||
|
||||
try:
|
||||
data = yield self.http_client.post_urlencoded_get_json(
|
||||
"https://%s%s" % (
|
||||
id_server, "/_matrix/identity/api/v1/3pid/bind"
|
||||
id_server_host, "/_matrix/identity/api/v1/3pid/bind"
|
||||
),
|
||||
{
|
||||
'sid': creds['sid'],
|
||||
@@ -221,6 +239,16 @@ class IdentityHandler(BaseHandler):
|
||||
b"Authorization": auth_headers,
|
||||
}
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
#
|
||||
# Note that destination_is has to be the real id_server, not
|
||||
# the server we connect to.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
||||
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(
|
||||
url,
|
||||
@@ -247,7 +275,14 @@ class IdentityHandler(BaseHandler):
|
||||
defer.returnValue(changed)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
|
||||
def requestEmailToken(
|
||||
self,
|
||||
id_server,
|
||||
email,
|
||||
client_secret,
|
||||
send_attempt,
|
||||
next_link=None,
|
||||
):
|
||||
if not self._should_trust_id_server(id_server):
|
||||
raise SynapseError(
|
||||
400, "Untrusted ID server '%s'" % id_server,
|
||||
@@ -259,7 +294,14 @@ class IdentityHandler(BaseHandler):
|
||||
'client_secret': client_secret,
|
||||
'send_attempt': send_attempt,
|
||||
}
|
||||
params.update(kwargs)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
|
||||
if next_link:
|
||||
params.update({'next_link': next_link})
|
||||
|
||||
try:
|
||||
data = yield self.http_client.post_json_get_json(
|
||||
@@ -292,7 +334,10 @@ class IdentityHandler(BaseHandler):
|
||||
'send_attempt': send_attempt,
|
||||
}
|
||||
params.update(kwargs)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.post_json_get_json(
|
||||
"https://%s%s" % (
|
||||
@@ -305,3 +350,125 @@ class IdentityHandler(BaseHandler):
|
||||
except HttpResponseException as e:
|
||||
logger.info("Proxied requestToken failed: %r", e)
|
||||
raise e.to_synapse_error()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def lookup_3pid(self, id_server, medium, address):
|
||||
"""Looks up a 3pid in the passed identity server.
|
||||
|
||||
Args:
|
||||
id_server (str): The server name (including port, if required)
|
||||
of the identity server to use.
|
||||
medium (str): The type of the third party identifier (e.g. "email").
|
||||
address (str): The third party identifier (e.g. "foo@example.com").
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: The result of the lookup. See
|
||||
https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
|
||||
for details
|
||||
"""
|
||||
if not self._should_trust_id_server(id_server):
|
||||
raise SynapseError(
|
||||
400, "Untrusted ID server '%s'" % id_server,
|
||||
Codes.SERVER_NOT_TRUSTED
|
||||
)
|
||||
|
||||
if not self._enable_lookup:
|
||||
raise AuthError(
|
||||
403, "Looking up third-party identifiers is denied from this server",
|
||||
)
|
||||
|
||||
target = self.rewrite_identity_server_urls.get(id_server, id_server)
|
||||
|
||||
try:
|
||||
data = yield self.http_client.get_json(
|
||||
"https://%s/_matrix/identity/api/v1/lookup" % (target,),
|
||||
{
|
||||
"medium": medium,
|
||||
"address": address,
|
||||
}
|
||||
)
|
||||
|
||||
if "mxid" in data:
|
||||
if "signatures" not in data:
|
||||
raise AuthError(401, "No signatures on 3pid binding")
|
||||
yield self._verify_any_signature(data, id_server)
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.info("Proxied lookup failed: %r", e)
|
||||
raise e.to_synapse_error()
|
||||
except IOError as e:
|
||||
logger.info("Failed to contact %r: %s", id_server, e)
|
||||
raise ProxiedRequestError(503, "Failed to contact identity server")
|
||||
|
||||
defer.returnValue(data)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bulk_lookup_3pid(self, id_server, threepids):
|
||||
"""Looks up given 3pids in the passed identity server.
|
||||
|
||||
Args:
|
||||
id_server (str): The server name (including port, if required)
|
||||
of the identity server to use.
|
||||
threepids ([[str, str]]): The third party identifiers to lookup, as
|
||||
a list of 2-string sized lists ([medium, address]).
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: The result of the lookup. See
|
||||
https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
|
||||
for details
|
||||
"""
|
||||
if not self._should_trust_id_server(id_server):
|
||||
raise SynapseError(
|
||||
400, "Untrusted ID server '%s'" % id_server,
|
||||
Codes.SERVER_NOT_TRUSTED
|
||||
)
|
||||
|
||||
if not self._enable_lookup:
|
||||
raise AuthError(
|
||||
403, "Looking up third-party identifiers is denied from this server",
|
||||
)
|
||||
|
||||
target = self.rewrite_identity_server_urls.get(id_server, id_server)
|
||||
|
||||
try:
|
||||
data = yield self.http_client.post_json_get_json(
|
||||
"https://%s/_matrix/identity/api/v1/bulk_lookup" % (target,),
|
||||
{
|
||||
"threepids": threepids,
|
||||
}
|
||||
)
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.info("Proxied lookup failed: %r", e)
|
||||
raise e.to_synapse_error()
|
||||
except IOError as e:
|
||||
logger.info("Failed to contact %r: %s", id_server, e)
|
||||
raise ProxiedRequestError(503, "Failed to contact identity server")
|
||||
|
||||
defer.returnValue(data)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _verify_any_signature(self, data, server_hostname):
|
||||
if server_hostname not in data["signatures"]:
|
||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||
|
||||
for key_name, signature in data["signatures"][server_hostname].items():
|
||||
target = self.rewrite_identity_server_urls.get(
|
||||
server_hostname, server_hostname,
|
||||
)
|
||||
|
||||
key_data = yield self.http_client.get_json(
|
||||
"https://%s/_matrix/identity/api/v1/pubkey/%s" %
|
||||
(target, key_name,),
|
||||
)
|
||||
if "public_key" not in key_data:
|
||||
raise AuthError(401, "No public key named %s from %s" %
|
||||
(key_name, server_hostname,))
|
||||
verify_signed_json(
|
||||
data,
|
||||
server_hostname,
|
||||
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
|
||||
)
|
||||
return
|
||||
|
||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2017 - 2018 New Vector Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -166,6 +167,9 @@ class MessageHandler(object):
|
||||
now = self.clock.time_msec()
|
||||
events = yield self._event_serializer.serialize_events(
|
||||
room_state.values(), now,
|
||||
# We don't bother bundling aggregations in when asked for state
|
||||
# events, as clients won't use them.
|
||||
bundle_aggregations=False,
|
||||
)
|
||||
defer.returnValue(events)
|
||||
|
||||
@@ -245,6 +249,7 @@ class EventCreationHandler(object):
|
||||
self.action_generator = hs.get_action_generator()
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
self._block_events_without_consent_error = (
|
||||
self.config.block_events_without_consent_error
|
||||
@@ -367,7 +372,7 @@ class EventCreationHandler(object):
|
||||
"You must be in the room to create an alias for it",
|
||||
)
|
||||
|
||||
self.validator.validate_new(event)
|
||||
self.validator.validate_new(event, self.config)
|
||||
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@@ -599,7 +604,7 @@ class EventCreationHandler(object):
|
||||
if requester:
|
||||
context.app_service = requester.app_service
|
||||
|
||||
self.validator.validate_new(event)
|
||||
self.validator.validate_new(event, self.config)
|
||||
|
||||
# If this event is an annotation then we check that that the sender
|
||||
# can't annotate the same way twice (e.g. stops users from liking an
|
||||
@@ -655,6 +660,14 @@ class EventCreationHandler(object):
|
||||
else:
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
event, context,
|
||||
)
|
||||
if not event_allowed:
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as err:
|
||||
|
||||
@@ -15,11 +15,14 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
@@ -79,6 +82,114 @@ class PaginationHandler(object):
|
||||
self._purges_by_id = {}
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
|
||||
|
||||
if hs.config.retention_enabled:
|
||||
# Run the purge jobs described in the configuration file.
|
||||
for job in hs.config.retention_purge_jobs:
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
job["interval"],
|
||||
"purge_history_for_rooms_in_range",
|
||||
self.purge_history_for_rooms_in_range,
|
||||
job["shortest_max_lifetime"],
|
||||
job["longest_max_lifetime"],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def purge_history_for_rooms_in_range(self, min_ms, max_ms):
|
||||
"""Purge outdated events from rooms within the given retention range.
|
||||
|
||||
If a default retention policy is defined in the server's configuration and its
|
||||
'max_lifetime' is within this range, also targets rooms which don't have a
|
||||
retention policy.
|
||||
|
||||
Args:
|
||||
min_ms (int|None): Duration in milliseconds that define the lower limit of
|
||||
the range to handle (exclusive). If None, it means that the range has no
|
||||
lower limit.
|
||||
max_ms (int|None): Duration in milliseconds that define the upper limit of
|
||||
the range to handle (inclusive). If None, it means that the range has no
|
||||
upper limit.
|
||||
"""
|
||||
# We want the storage layer to to include rooms with no retention policy in its
|
||||
# return value only if a default retention policy is defined in the server's
|
||||
# configuration and that policy's 'max_lifetime' is either lower (or equal) than
|
||||
# max_ms or higher than min_ms (or both).
|
||||
if self._retention_default_max_lifetime is not None:
|
||||
include_null = True
|
||||
|
||||
if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
|
||||
# The default max_lifetime is lower than (or equal to) min_ms.
|
||||
include_null = False
|
||||
|
||||
if max_ms is not None and max_ms < self._retention_default_max_lifetime:
|
||||
# The default max_lifetime is higher than max_ms.
|
||||
include_null = False
|
||||
else:
|
||||
include_null = False
|
||||
|
||||
rooms = yield self.store.get_rooms_for_retention_period_in_range(
|
||||
min_ms, max_ms, include_null
|
||||
)
|
||||
|
||||
for room_id, retention_policy in iteritems(rooms):
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
logger.warning(
|
||||
"[purge] not purging room %s as there's an ongoing purge running"
|
||||
" for this room",
|
||||
room_id,
|
||||
)
|
||||
continue
|
||||
|
||||
max_lifetime = retention_policy["max_lifetime"]
|
||||
|
||||
if max_lifetime is None:
|
||||
# If max_lifetime is None, it means that include_null equals True,
|
||||
# therefore we can safely assume that there is a default policy defined
|
||||
# in the server's configuration.
|
||||
max_lifetime = self._retention_default_max_lifetime
|
||||
|
||||
# Figure out what token we should start purging at.
|
||||
ts = self.clock.time_msec() - max_lifetime
|
||||
|
||||
stream_ordering = (
|
||||
yield self.store.find_first_stream_ordering_after_ts(ts)
|
||||
)
|
||||
|
||||
r = (
|
||||
yield self.store.get_room_event_after_stream_ordering(
|
||||
room_id, stream_ordering,
|
||||
)
|
||||
)
|
||||
if not r:
|
||||
logger.warning(
|
||||
"[purge] purging events not possible: No event found "
|
||||
"(ts %i => stream_ordering %i)",
|
||||
ts, stream_ordering,
|
||||
)
|
||||
continue
|
||||
|
||||
(stream, topo, _event_id) = r
|
||||
token = "t%d-%d" % (topo, stream)
|
||||
|
||||
purge_id = random_string(16)
|
||||
|
||||
self._purges_by_id[purge_id] = PurgeStatus()
|
||||
|
||||
logger.info(
|
||||
"Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
|
||||
)
|
||||
|
||||
# We want to purge everything, including local events, and to run the purge in
|
||||
# the background so that it's not blocking any other operation apart from
|
||||
# other purges in the same room.
|
||||
run_as_background_process(
|
||||
"_purge_history",
|
||||
self._purge_history,
|
||||
purge_id, room_id, token, True,
|
||||
)
|
||||
|
||||
def start_purge_history(self, room_id, token,
|
||||
delete_local_events=False):
|
||||
"""Start off a history purge on a room.
|
||||
|
||||
93
synapse/handlers/password_policy.py
Normal file
93
synapse/handlers/password_policy.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
from synapse.api.errors import Codes, PasswordRefusedError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PasswordPolicyHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.policy = hs.config.password_policy
|
||||
self.enabled = hs.config.password_policy_enabled
|
||||
|
||||
# Regexps for the spec'd policy parameters.
|
||||
self.regexp_digit = re.compile("[0-9]")
|
||||
self.regexp_symbol = re.compile("[^a-zA-Z0-9]")
|
||||
self.regexp_uppercase = re.compile("[A-Z]")
|
||||
self.regexp_lowercase = re.compile("[a-z]")
|
||||
|
||||
def validate_password(self, password):
|
||||
"""Checks whether a given password complies with the server's policy.
|
||||
|
||||
Args:
|
||||
password (str): The password to check against the server's policy.
|
||||
|
||||
Raises:
|
||||
PasswordRefusedError: The password doesn't comply with the server's policy.
|
||||
"""
|
||||
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
minimum_accepted_length = self.policy.get("minimum_length", 0)
|
||||
if len(password) < minimum_accepted_length:
|
||||
raise PasswordRefusedError(
|
||||
msg=(
|
||||
"The password must be at least %d characters long"
|
||||
% minimum_accepted_length
|
||||
),
|
||||
errcode=Codes.PASSWORD_TOO_SHORT,
|
||||
)
|
||||
|
||||
if (
|
||||
self.policy.get("require_digit", False) and
|
||||
self.regexp_digit.search(password) is None
|
||||
):
|
||||
raise PasswordRefusedError(
|
||||
msg="The password must include at least one digit",
|
||||
errcode=Codes.PASSWORD_NO_DIGIT,
|
||||
)
|
||||
|
||||
if (
|
||||
self.policy.get("require_symbol", False) and
|
||||
self.regexp_symbol.search(password) is None
|
||||
):
|
||||
raise PasswordRefusedError(
|
||||
msg="The password must include at least one symbol",
|
||||
errcode=Codes.PASSWORD_NO_SYMBOL,
|
||||
)
|
||||
|
||||
if (
|
||||
self.policy.get("require_uppercase", False) and
|
||||
self.regexp_uppercase.search(password) is None
|
||||
):
|
||||
raise PasswordRefusedError(
|
||||
msg="The password must include at least one uppercase letter",
|
||||
errcode=Codes.PASSWORD_NO_UPPERCASE,
|
||||
)
|
||||
|
||||
if (
|
||||
self.policy.get("require_lowercase", False) and
|
||||
self.regexp_lowercase.search(password) is None
|
||||
):
|
||||
raise PasswordRefusedError(
|
||||
msg="The password must include at least one lowercase letter",
|
||||
errcode=Codes.PASSWORD_NO_LOWERCASE,
|
||||
)
|
||||
@@ -158,7 +158,13 @@ class PresenceHandler(object):
|
||||
# have not yet been persisted
|
||||
self.unpersisted_users_changes = set()
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
run_as_background_process,
|
||||
"presence.on_shutdown",
|
||||
self._on_shutdown,
|
||||
)
|
||||
|
||||
self.serial_to_user = {}
|
||||
self._next_serial = 1
|
||||
@@ -182,17 +188,27 @@ class PresenceHandler(object):
|
||||
# Start a LoopingCall in 30s that fires every 5s.
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
def run_timeout_handler():
|
||||
return run_as_background_process(
|
||||
"handle_presence_timeouts", self._handle_timeouts
|
||||
)
|
||||
|
||||
self.clock.call_later(
|
||||
30,
|
||||
self.clock.looping_call,
|
||||
self._handle_timeouts,
|
||||
run_timeout_handler,
|
||||
5000,
|
||||
)
|
||||
|
||||
def run_persister():
|
||||
return run_as_background_process(
|
||||
"persist_presence_changes", self._persist_unpersisted_changes
|
||||
)
|
||||
|
||||
self.clock.call_later(
|
||||
60,
|
||||
self.clock.looping_call,
|
||||
self._persist_unpersisted_changes,
|
||||
run_persister,
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
@@ -229,6 +245,7 @@ class PresenceHandler(object):
|
||||
)
|
||||
|
||||
if self.unpersisted_users_changes:
|
||||
|
||||
yield self.store.update_presence([
|
||||
self.user_to_current_state[user_id]
|
||||
for user_id in self.unpersisted_users_changes
|
||||
@@ -240,30 +257,18 @@ class PresenceHandler(object):
|
||||
"""We periodically persist the unpersisted changes, as otherwise they
|
||||
may stack up and slow down shutdown times.
|
||||
"""
|
||||
logger.info(
|
||||
"Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
|
||||
len(self.unpersisted_users_changes)
|
||||
)
|
||||
|
||||
unpersisted = self.unpersisted_users_changes
|
||||
self.unpersisted_users_changes = set()
|
||||
|
||||
if unpersisted:
|
||||
logger.info(
|
||||
"Persisting %d upersisted presence updates", len(unpersisted)
|
||||
)
|
||||
yield self.store.update_presence([
|
||||
self.user_to_current_state[user_id]
|
||||
for user_id in unpersisted
|
||||
])
|
||||
|
||||
logger.info("Finished _persist_unpersisted_changes")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_states_and_catch_exception(self, new_states):
|
||||
try:
|
||||
res = yield self._update_states(new_states)
|
||||
defer.returnValue(res)
|
||||
except Exception:
|
||||
logger.exception("Error updating presence")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_states(self, new_states):
|
||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||
@@ -338,45 +343,41 @@ class PresenceHandler(object):
|
||||
logger.info("Handling presence timeouts")
|
||||
now = self.clock.time_msec()
|
||||
|
||||
try:
|
||||
with Measure(self.clock, "presence_handle_timeouts"):
|
||||
# Fetch the list of users that *may* have timed out. Things may have
|
||||
# changed since the timeout was set, so we won't necessarily have to
|
||||
# take any action.
|
||||
users_to_check = set(self.wheel_timer.fetch(now))
|
||||
# Fetch the list of users that *may* have timed out. Things may have
|
||||
# changed since the timeout was set, so we won't necessarily have to
|
||||
# take any action.
|
||||
users_to_check = set(self.wheel_timer.fetch(now))
|
||||
|
||||
# Check whether the lists of syncing processes from an external
|
||||
# process have expired.
|
||||
expired_process_ids = [
|
||||
process_id for process_id, last_update
|
||||
in self.external_process_last_updated_ms.items()
|
||||
if now - last_update > EXTERNAL_PROCESS_EXPIRY
|
||||
]
|
||||
for process_id in expired_process_ids:
|
||||
users_to_check.update(
|
||||
self.external_process_last_updated_ms.pop(process_id, ())
|
||||
)
|
||||
self.external_process_last_update.pop(process_id)
|
||||
# Check whether the lists of syncing processes from an external
|
||||
# process have expired.
|
||||
expired_process_ids = [
|
||||
process_id for process_id, last_update
|
||||
in self.external_process_last_updated_ms.items()
|
||||
if now - last_update > EXTERNAL_PROCESS_EXPIRY
|
||||
]
|
||||
for process_id in expired_process_ids:
|
||||
users_to_check.update(
|
||||
self.external_process_last_updated_ms.pop(process_id, ())
|
||||
)
|
||||
self.external_process_last_update.pop(process_id)
|
||||
|
||||
states = [
|
||||
self.user_to_current_state.get(
|
||||
user_id, UserPresenceState.default(user_id)
|
||||
)
|
||||
for user_id in users_to_check
|
||||
]
|
||||
states = [
|
||||
self.user_to_current_state.get(
|
||||
user_id, UserPresenceState.default(user_id)
|
||||
)
|
||||
for user_id in users_to_check
|
||||
]
|
||||
|
||||
timers_fired_counter.inc(len(states))
|
||||
timers_fired_counter.inc(len(states))
|
||||
|
||||
changes = handle_timeouts(
|
||||
states,
|
||||
is_mine_fn=self.is_mine_id,
|
||||
syncing_user_ids=self.get_currently_syncing_users(),
|
||||
now=now,
|
||||
)
|
||||
changes = handle_timeouts(
|
||||
states,
|
||||
is_mine_fn=self.is_mine_id,
|
||||
syncing_user_ids=self.get_currently_syncing_users(),
|
||||
now=now,
|
||||
)
|
||||
|
||||
run_in_background(self._update_states_and_catch_exception, changes)
|
||||
except Exception:
|
||||
logger.exception("Exception in _handle_timeouts loop")
|
||||
return self._update_states(changes)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bump_presence_active_time(self, user):
|
||||
@@ -833,14 +834,17 @@ class PresenceHandler(object):
|
||||
# joins.
|
||||
continue
|
||||
|
||||
event = yield self.store.get_event(event_id)
|
||||
if event.content.get("membership") != Membership.JOIN:
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
if not event or event.content.get("membership") != Membership.JOIN:
|
||||
# We only care about joins
|
||||
continue
|
||||
|
||||
if prev_event_id:
|
||||
prev_event = yield self.store.get_event(prev_event_id)
|
||||
if prev_event.content.get("membership") == Membership.JOIN:
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
if (
|
||||
prev_event
|
||||
and prev_event.content.get("membership") == Membership.JOIN
|
||||
):
|
||||
# Ignore changes to join events.
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,7 +16,11 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
from six.moves import range
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -26,11 +31,15 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_DISPLAYNAME_LEN = 100
|
||||
MAX_AVATAR_URL_LEN = 1000
|
||||
|
||||
|
||||
class BaseProfileHandler(BaseHandler):
|
||||
"""Handles fetching and updating user profile information.
|
||||
@@ -40,6 +49,8 @@ class BaseProfileHandler(BaseHandler):
|
||||
subclass MasterProfileHandler
|
||||
"""
|
||||
|
||||
PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(BaseProfileHandler, self).__init__(hs)
|
||||
|
||||
@@ -50,6 +61,84 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
if hs.config.worker_app is None:
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
reactor.callWhenRunning(self._assign_profile_replication_batches)
|
||||
reactor.callWhenRunning(self._replicate_profiles)
|
||||
# Add a looping call to replicate_profiles: this handles retries
|
||||
# if the replication is unsuccessful when the user updated their
|
||||
# profile.
|
||||
self.clock.looping_call(
|
||||
self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _assign_profile_replication_batches(self):
|
||||
"""If no profile replication has been done yet, allocate replication batch
|
||||
numbers to each profile to start the replication process.
|
||||
"""
|
||||
logger.info("Assigning profile batch numbers...")
|
||||
total = 0
|
||||
while True:
|
||||
assigned = yield self.store.assign_profile_batch()
|
||||
total += assigned
|
||||
if assigned == 0:
|
||||
break
|
||||
logger.info("Assigned %d profile batch numbers", total)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_profiles(self):
|
||||
"""If any profile data has been updated and not pushed to the replication targets,
|
||||
replicate it.
|
||||
"""
|
||||
host_batches = yield self.store.get_replication_hosts()
|
||||
latest_batch = yield self.store.get_latest_profile_replication_batch_number()
|
||||
if latest_batch is None:
|
||||
latest_batch = -1
|
||||
for repl_host in self.hs.config.replicate_user_profiles_to:
|
||||
if repl_host not in host_batches:
|
||||
host_batches[repl_host] = -1
|
||||
try:
|
||||
for i in range(host_batches[repl_host] + 1, latest_batch + 1):
|
||||
yield self._replicate_host_profile_batch(repl_host, i)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception while replicating to %s: aborting for now", repl_host,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_host_profile_batch(self, host, batchnum):
|
||||
logger.info("Replicating profile batch %d to %s", batchnum, host)
|
||||
batch_rows = yield self.store.get_profile_batch(batchnum)
|
||||
batch = {
|
||||
UserID(r["user_id"], self.hs.hostname).to_string(): ({
|
||||
"display_name": r["displayname"],
|
||||
"avatar_url": r["avatar_url"],
|
||||
} if r["active"] else None) for r in batch_rows
|
||||
}
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
|
||||
body = {
|
||||
"batchnum": batchnum,
|
||||
"batch": batch,
|
||||
"origin_server": self.hs.hostname,
|
||||
}
|
||||
signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(url, signed_body)
|
||||
yield self.store.update_replication_batch_for_host(host, batchnum)
|
||||
logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
|
||||
except Exception:
|
||||
# This will get retried when the looping call next comes around
|
||||
logger.exception("Failed to replicate profile batch %d to %s", batchnum, host)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile(self, user_id):
|
||||
target_user = UserID.from_string(user_id)
|
||||
@@ -159,14 +248,30 @@ class BaseProfileHandler(BaseHandler):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if not by_admin and target_user != requester.user:
|
||||
if not by_admin and requester and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's displayname")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_displayname:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.display_name:
|
||||
raise SynapseError(400, "Changing displayname is disabled on this server")
|
||||
|
||||
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
|
||||
raise SynapseError(
|
||||
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ),
|
||||
)
|
||||
|
||||
if new_displayname == '':
|
||||
new_displayname = None
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
yield self.store.set_profile_displayname(
|
||||
target_user.localpart, new_displayname
|
||||
target_user.localpart, new_displayname, new_batchnum
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -175,7 +280,37 @@ class BaseProfileHandler(BaseHandler):
|
||||
target_user.to_string(), profile
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
if requester:
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_active(self, target_user, active, hide):
|
||||
"""
|
||||
Sets the 'active' flag on a user profile. If set to false, the user
|
||||
account is considered deactivated or hidden.
|
||||
|
||||
If 'hide' is true, then we interpret active=False as a request to try to
|
||||
hide the user rather than deactivating it. This means withholding the
|
||||
profile from replication (and mark it as inactive) rather than clearing
|
||||
the profile from the HS DB. Note that unlike set_displayname and
|
||||
set_avatar_url, this does *not* perform authorization checks! This is
|
||||
because the only place it's used currently is in account deactivation
|
||||
where we've already done these checks anyway.
|
||||
"""
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
yield self.store.set_profile_active(
|
||||
target_user.localpart, active, hide, new_batchnum
|
||||
)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_avatar_url(self, target_user):
|
||||
@@ -217,8 +352,24 @@ class BaseProfileHandler(BaseHandler):
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's avatar_url")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_avatar_url:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.avatar_url:
|
||||
raise SynapseError(400, "Changing avatar url is disabled on this server")
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
|
||||
raise SynapseError(
|
||||
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ),
|
||||
)
|
||||
|
||||
yield self.store.set_profile_avatar_url(
|
||||
target_user.localpart, new_avatar_url
|
||||
target_user.localpart, new_avatar_url, new_batchnum,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -229,6 +380,9 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
user = UserID.from_string(args["user_id"])
|
||||
@@ -308,6 +462,10 @@ class BaseProfileHandler(BaseHandler):
|
||||
if not self.hs.config.require_auth_for_profile_requests or not requester:
|
||||
return
|
||||
|
||||
# Always allow the user to query their own profile.
|
||||
if target_user.to_string() == requester.to_string():
|
||||
return
|
||||
|
||||
try:
|
||||
requester_rooms = yield self.store.get_rooms_for_user(
|
||||
requester.to_string()
|
||||
|
||||
@@ -61,6 +61,7 @@ class RegistrationHandler(BaseHandler):
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.identity_handler = self.hs.get_handlers().identity_handler
|
||||
self.ratelimiter = hs.get_registration_ratelimiter()
|
||||
|
||||
@@ -73,6 +74,8 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
self._show_in_user_directory = self.hs.config.show_users_in_user_directory
|
||||
|
||||
if hs.config.worker_app:
|
||||
self._register_client = ReplicationRegisterServlet.make_client(hs)
|
||||
self._register_device_client = (
|
||||
@@ -234,6 +237,11 @@ class RegistrationHandler(BaseHandler):
|
||||
address=address,
|
||||
)
|
||||
|
||||
if default_display_name:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, default_display_name, by_admin=True,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
@@ -263,6 +271,11 @@ class RegistrationHandler(BaseHandler):
|
||||
create_profile_with_displayname=default_display_name,
|
||||
address=address,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, default_display_name, by_admin=True,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
# if user id is taken, just generate another
|
||||
user = None
|
||||
@@ -287,6 +300,14 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, threepid_dict, None, False,
|
||||
)
|
||||
|
||||
# Prevent the new user from showing up in the user directory if the server
|
||||
# mandates it.
|
||||
if not self._show_in_user_directory:
|
||||
yield self.store.add_account_data_for_user(
|
||||
user_id, "im.vector.hide_profile", {'hide_profile': True},
|
||||
)
|
||||
yield self.profile_handler.set_active(user, False, True)
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -356,7 +377,9 @@ class RegistrationHandler(BaseHandler):
|
||||
yield self._auto_join_rooms(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def appservice_register(self, user_localpart, as_token):
|
||||
def appservice_register(self, user_localpart, as_token, password, display_name):
|
||||
# FIXME: this should be factored out and merged with normal register()
|
||||
|
||||
user = UserID(user_localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
service = self.store.get_app_service_by_token(as_token)
|
||||
@@ -374,12 +397,29 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, allowed_appservice=service
|
||||
)
|
||||
|
||||
password_hash = ""
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
|
||||
display_name = display_name or user.localpart
|
||||
|
||||
yield self.register_with_store(
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
password_hash=password_hash,
|
||||
appservice_id=service_id,
|
||||
create_profile_with_displayname=user.localpart,
|
||||
create_profile_with_displayname=display_name,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, display_name, by_admin=True,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(user_localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
user_id, profile
|
||||
)
|
||||
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -405,6 +445,39 @@ class RegistrationHandler(BaseHandler):
|
||||
else:
|
||||
logger.info("Valid captcha entered from %s", ip)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_saml2(self, localpart):
|
||||
"""
|
||||
Registers email_id as SAML2 Based Auth.
|
||||
"""
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./'",
|
||||
)
|
||||
yield self.auth.check_auth_blocking()
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
try:
|
||||
yield self.register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=None,
|
||||
create_profile_with_displayname=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
except Exception as e:
|
||||
yield self.store.add_access_token_to_user(user_id, token)
|
||||
# Ignore Registration errors
|
||||
logger.exception(e)
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_email(self, threepidCreds):
|
||||
"""
|
||||
@@ -427,7 +500,9 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("got threepid with medium '%s' and address '%s'",
|
||||
threepid['medium'], threepid['address'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']):
|
||||
if not (
|
||||
yield check_3pid_allowed(self.hs, threepid['medium'], threepid['address'])
|
||||
):
|
||||
raise RegistrationError(
|
||||
403, "Third party identifier is not allowed"
|
||||
)
|
||||
@@ -468,6 +543,39 @@ class RegistrationHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_register(self, localpart, display_name, auth_result, params):
|
||||
"""Invokes the current registration on another server, using
|
||||
shared secret registration, passing in any auth_results from
|
||||
other registration UI auth flows (e.g. validated 3pids)
|
||||
Useful for setting up shadow/backup accounts on a parallel deployment.
|
||||
"""
|
||||
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.post_json_get_json(
|
||||
"%s/_matrix/client/r0/register?access_token=%s" % (
|
||||
shadow_hs_url, as_token,
|
||||
),
|
||||
{
|
||||
# XXX: auth_result is an unspecified extension for shadow registration
|
||||
'auth_result': auth_result,
|
||||
# XXX: another unspecified extension for shadow registration to ensure
|
||||
# that the displayname is correctly set by the masters erver
|
||||
'display_name': display_name,
|
||||
'username': localpart,
|
||||
'password': params.get("password"),
|
||||
'bind_email': params.get("bind_email"),
|
||||
'bind_msisdn': params.get("bind_msisdn"),
|
||||
'device_id': params.get("device_id"),
|
||||
'initial_device_display_name': params.get("initial_device_display_name"),
|
||||
'inhibit_login': False,
|
||||
'access_token': as_token,
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
@@ -531,6 +639,8 @@ class RegistrationHandler(BaseHandler):
|
||||
A tuple of (user_id, access_token).
|
||||
Raises:
|
||||
RegistrationError if there was a problem registering.
|
||||
|
||||
NB this is only used in tests. TODO: move it to the test package!
|
||||
"""
|
||||
if localpart is None:
|
||||
raise SynapseError(400, "Request must include user id")
|
||||
@@ -554,18 +664,16 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
create_profile_with_displayname=user.localpart,
|
||||
create_profile_with_displayname=displayname or user.localpart,
|
||||
)
|
||||
if displayname is not None:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, displayname or user.localpart, by_admin=True,
|
||||
)
|
||||
else:
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||
|
||||
if displayname is not None:
|
||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, requester, displayname, by_admin=True,
|
||||
)
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -27,7 +27,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.util import stringutils
|
||||
@@ -49,12 +49,14 @@ class RoomCreationHandler(BaseHandler):
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": True,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.PUBLIC_CHAT: {
|
||||
"join_rules": JoinRules.PUBLIC,
|
||||
@@ -70,10 +72,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self.config = hs.config
|
||||
|
||||
# linearizer to stop two upgrades happening at once
|
||||
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upgrade_room(self, requester, old_room_id, new_version):
|
||||
"""Replace a room with a new room with a different version
|
||||
@@ -247,7 +254,22 @@ class RoomCreationHandler(BaseHandler):
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
if (self._server_notices_mxid is not None and
|
||||
requester.user.to_string() == self._server_notices_mxid):
|
||||
# allow the server notices mxid to create rooms
|
||||
is_requester_admin = True
|
||||
|
||||
else:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
if not is_requester_admin and not self.spam_checker.user_may_create_room(
|
||||
user_id,
|
||||
invite_list=[],
|
||||
third_party_invite_list=[],
|
||||
cloning=True,
|
||||
):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
creation_content = {
|
||||
@@ -469,13 +491,42 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
yield self.auth.check_auth_blocking(user_id)
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
if (self._server_notices_mxid is not None and
|
||||
requester.user.to_string() == self._server_notices_mxid):
|
||||
# allow the server notices mxid to create rooms
|
||||
is_requester_admin = True
|
||||
else:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
# Check whether the third party rules allows/changes the room create
|
||||
# request.
|
||||
yield self.third_party_event_rules.on_create_room(
|
||||
requester,
|
||||
config,
|
||||
is_requester_admin=is_requester_admin,
|
||||
)
|
||||
|
||||
invite_list = config.get("invite", [])
|
||||
invite_3pid_list = config.get("invite_3pid", [])
|
||||
|
||||
if not is_requester_admin and not self.spam_checker.user_may_create_room(
|
||||
user_id,
|
||||
invite_list=invite_list,
|
||||
third_party_invite_list=invite_3pid_list,
|
||||
cloning=False,
|
||||
):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
if ratelimit:
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier)
|
||||
room_version = config.get(
|
||||
"room_version",
|
||||
self.config.default_room_version.identifier,
|
||||
)
|
||||
|
||||
if not isinstance(room_version, string_types):
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -512,7 +563,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
else:
|
||||
room_alias = None
|
||||
|
||||
invite_list = config.get("invite", [])
|
||||
for i in invite_list:
|
||||
try:
|
||||
UserID.from_string(i)
|
||||
@@ -523,8 +573,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
requester,
|
||||
)
|
||||
|
||||
invite_3pid_list = config.get("invite_3pid", [])
|
||||
|
||||
visibility = config.get("visibility", None)
|
||||
is_public = visibility == "public"
|
||||
|
||||
@@ -610,6 +658,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
"invite",
|
||||
ratelimit=False,
|
||||
content=content,
|
||||
new_room=True,
|
||||
)
|
||||
|
||||
for invite_3pid in invite_3pid_list:
|
||||
@@ -624,6 +673,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
id_server,
|
||||
requester,
|
||||
txn_id=None,
|
||||
new_room=True,
|
||||
)
|
||||
|
||||
result = {"room_id": room_id}
|
||||
@@ -694,6 +744,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
"join",
|
||||
ratelimit=False,
|
||||
content=creator_join_profile,
|
||||
new_room=True,
|
||||
)
|
||||
|
||||
# We treat the power levels override specially as this needs to be one
|
||||
@@ -769,6 +820,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=content,
|
||||
)
|
||||
|
||||
if "encryption_alg" in config:
|
||||
yield send(
|
||||
etype=EventTypes.Encryption,
|
||||
state_key="",
|
||||
content={
|
||||
'algorithm': config["encryption_alg"],
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_room_id(self, creator_id, is_public):
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
|
||||
@@ -20,16 +20,12 @@ import logging
|
||||
|
||||
from six.moves import http_client
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.server
|
||||
import synapse.types
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, ProxiedRequestError, SynapseError
|
||||
from synapse.types import RoomID, UserID
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_joined_room, user_left_room
|
||||
@@ -67,12 +63,15 @@ class RoomMemberHandler(object):
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.identity_handler = hs.get_handlers().identity_handler
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
self._server_notices_mxid = self.config.server_notices_mxid
|
||||
self.rewrite_identity_server_urls = self.config.rewrite_identity_server_urls
|
||||
self._enable_lookup = hs.config.enable_3pid_lookup
|
||||
self.allow_per_room_profiles = self.config.allow_per_room_profiles
|
||||
|
||||
@@ -317,8 +316,31 @@ class RoomMemberHandler(object):
|
||||
third_party_signed=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
new_room=False,
|
||||
require_consent=True,
|
||||
):
|
||||
"""Update a users membership in a room
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
target (UserID)
|
||||
room_id (str)
|
||||
action (str): The "action" the requester is performing against the
|
||||
target. One of join/leave/kick/ban/invite/unban.
|
||||
txn_id (str|None): The transaction ID associated with the request,
|
||||
or None not provided.
|
||||
remote_room_hosts (list[str]|None): List of remote servers to try
|
||||
and join via if server isn't already in the room.
|
||||
third_party_signed (dict|None): The signed object for third party
|
||||
invites.
|
||||
ratelimit (bool): Whether to apply ratelimiting to this request.
|
||||
content (dict|None): Fields to include in the new events content.
|
||||
new_room (bool): Whether these membership changes are happening
|
||||
as part of a room creation (e.g. initial joins and invites)
|
||||
|
||||
Returns:
|
||||
Deferred[FrozenEvent]
|
||||
"""
|
||||
key = (room_id,)
|
||||
|
||||
with (yield self.member_linearizer.queue(key)):
|
||||
@@ -332,6 +354,7 @@ class RoomMemberHandler(object):
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
new_room=new_room,
|
||||
require_consent=require_consent,
|
||||
)
|
||||
|
||||
@@ -349,6 +372,7 @@ class RoomMemberHandler(object):
|
||||
third_party_signed=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
new_room=False,
|
||||
require_consent=True,
|
||||
):
|
||||
content_specified = bool(content)
|
||||
@@ -416,8 +440,14 @@ class RoomMemberHandler(object):
|
||||
)
|
||||
block_invite = True
|
||||
|
||||
is_published = yield self.store.is_room_published(room_id)
|
||||
|
||||
if not self.spam_checker.user_may_invite(
|
||||
requester.user.to_string(), target.to_string(), room_id,
|
||||
requester.user.to_string(), target.to_string(),
|
||||
third_party_invite=None,
|
||||
room_id=room_id,
|
||||
new_room=new_room,
|
||||
published_room=is_published,
|
||||
):
|
||||
logger.info("Blocking invite due to spam checker")
|
||||
block_invite = True
|
||||
@@ -496,8 +526,29 @@ class RoomMemberHandler(object):
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
if (self._server_notices_mxid is not None and
|
||||
requester.user.to_string() == self._server_notices_mxid):
|
||||
# allow the server notices mxid to join rooms
|
||||
is_requester_admin = True
|
||||
|
||||
else:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
inviter = yield self._get_inviter(target.to_string(), room_id)
|
||||
if not is_requester_admin:
|
||||
# We assume that if the spam checker allowed the user to create
|
||||
# a room then they're allowed to join it.
|
||||
if not new_room and not self.spam_checker.user_may_join_room(
|
||||
target.to_string(), room_id,
|
||||
is_invited=inviter is not None,
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "Not allowed to join this room",
|
||||
)
|
||||
|
||||
if not is_host_in_room:
|
||||
inviter = yield self._get_inviter(target.to_string(), room_id)
|
||||
if inviter and not self.hs.is_mine(inviter):
|
||||
remote_room_hosts.append(inviter.domain)
|
||||
|
||||
@@ -707,7 +758,8 @@ class RoomMemberHandler(object):
|
||||
address,
|
||||
id_server,
|
||||
requester,
|
||||
txn_id
|
||||
txn_id,
|
||||
new_room=False,
|
||||
):
|
||||
if self.config.block_non_admin_invites:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
@@ -723,10 +775,36 @@ class RoomMemberHandler(object):
|
||||
# can't just rely on the standard ratelimiting of events.
|
||||
yield self.base_handler.ratelimit(requester)
|
||||
|
||||
can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
|
||||
medium, address, room_id,
|
||||
)
|
||||
if not can_invite:
|
||||
raise SynapseError(
|
||||
403, "This third-party identifier can not be invited in this room",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
invitee = yield self._lookup_3pid(
|
||||
id_server, medium, address
|
||||
)
|
||||
|
||||
is_published = yield self.store.is_room_published(room_id)
|
||||
|
||||
if not self.spam_checker.user_may_invite(
|
||||
requester.user.to_string(), invitee,
|
||||
third_party_invite={
|
||||
"medium": medium,
|
||||
"address": address,
|
||||
},
|
||||
room_id=room_id,
|
||||
new_room=new_room,
|
||||
published_room=is_published,
|
||||
):
|
||||
logger.info("Blocking invite due to spam checker")
|
||||
raise SynapseError(
|
||||
403, "Invites have been disabled on this server",
|
||||
)
|
||||
|
||||
if invitee:
|
||||
yield self.update_membership(
|
||||
requester,
|
||||
@@ -746,6 +824,20 @@ class RoomMemberHandler(object):
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
def _get_id_server_target(self, id_server):
|
||||
"""Looks up an id_server's actual http endpoint
|
||||
|
||||
Args:
|
||||
id_server (str): the server name to lookup.
|
||||
|
||||
Returns:
|
||||
the http endpoint to connect to.
|
||||
"""
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
return self.rewrite_identity_server_urls[id_server]
|
||||
|
||||
return id_server
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _lookup_3pid(self, id_server, medium, address):
|
||||
"""Looks up a 3pid in the passed identity server.
|
||||
@@ -759,48 +851,13 @@ class RoomMemberHandler(object):
|
||||
Returns:
|
||||
str: the matrix ID of the 3pid, or None if it is not recognized.
|
||||
"""
|
||||
if not self._enable_lookup:
|
||||
raise SynapseError(
|
||||
403, "Looking up third-party identifiers is denied from this server",
|
||||
)
|
||||
try:
|
||||
data = yield self.simple_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
|
||||
{
|
||||
"medium": medium,
|
||||
"address": address,
|
||||
}
|
||||
)
|
||||
|
||||
if "mxid" in data:
|
||||
if "signatures" not in data:
|
||||
raise AuthError(401, "No signatures on 3pid binding")
|
||||
yield self._verify_any_signature(data, id_server)
|
||||
defer.returnValue(data["mxid"])
|
||||
|
||||
except IOError as e:
|
||||
data = yield self.identity_handler.lookup_3pid(id_server, medium, address)
|
||||
defer.returnValue(data.get("mxid"))
|
||||
except ProxiedRequestError as e:
|
||||
logger.warn("Error from identity server lookup: %s" % (e,))
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _verify_any_signature(self, data, server_hostname):
|
||||
if server_hostname not in data["signatures"]:
|
||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||
for key_name, signature in data["signatures"][server_hostname].items():
|
||||
key_data = yield self.simple_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/pubkey/%s" %
|
||||
(id_server_scheme, server_hostname, key_name,),
|
||||
)
|
||||
if "public_key" not in key_data:
|
||||
raise AuthError(401, "No public key named %s from %s" %
|
||||
(key_name, server_hostname,))
|
||||
verify_signed_json(
|
||||
data,
|
||||
server_hostname,
|
||||
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
|
||||
)
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _make_and_store_3pid_invite(
|
||||
self,
|
||||
@@ -878,6 +935,7 @@ class RoomMemberHandler(object):
|
||||
"sender": user.to_string(),
|
||||
"state_key": token,
|
||||
},
|
||||
ratelimit=False,
|
||||
txn_id=txn_id,
|
||||
)
|
||||
|
||||
@@ -926,8 +984,9 @@ class RoomMemberHandler(object):
|
||||
user.
|
||||
"""
|
||||
|
||||
target = self._get_id_server_target(id_server)
|
||||
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
|
||||
id_server_scheme, id_server,
|
||||
id_server_scheme, target,
|
||||
)
|
||||
|
||||
invite_config = {
|
||||
@@ -967,7 +1026,7 @@ class RoomMemberHandler(object):
|
||||
fallback_public_key = {
|
||||
"public_key": data["public_key"],
|
||||
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
|
||||
id_server_scheme, id_server,
|
||||
id_server_scheme, target,
|
||||
),
|
||||
}
|
||||
else:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,9 +30,12 @@ class SetPasswordHandler(BaseHandler):
|
||||
super(SetPasswordHandler, self).__init__(hs)
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._password_policy_handler = hs.get_password_policy_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
self._password_policy_handler.validate_password(newpassword)
|
||||
|
||||
password_hash = yield self._auth_handler.hash(newpassword)
|
||||
|
||||
except_device_id = requester.device_id if requester else None
|
||||
|
||||
@@ -115,6 +115,7 @@ class StatsHandler(StateDeltasHandler):
|
||||
event_id = delta["event_id"]
|
||||
stream_id = delta["stream_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
stream_pos = delta["stream_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
|
||||
@@ -136,10 +137,15 @@ class StatsHandler(StateDeltasHandler):
|
||||
event_content = {}
|
||||
|
||||
if event_id is not None:
|
||||
event_content = (yield self.store.get_event(event_id)).content or {}
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
if event:
|
||||
event_content = event.content or {}
|
||||
|
||||
# We use stream_pos here rather than fetch by event_id as event_id
|
||||
# may be None
|
||||
now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
|
||||
|
||||
# quantise time to the nearest bucket
|
||||
now = yield self.store.get_received_ts(event_id)
|
||||
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
|
||||
|
||||
if typ == EventTypes.Member:
|
||||
@@ -149,9 +155,11 @@ class StatsHandler(StateDeltasHandler):
|
||||
# compare them.
|
||||
prev_event_content = {}
|
||||
if prev_event_id is not None:
|
||||
prev_event_content = (
|
||||
yield self.store.get_event(prev_event_id)
|
||||
).content
|
||||
prev_event = yield self.store.get_event(
|
||||
prev_event_id, allow_none=True,
|
||||
)
|
||||
if prev_event:
|
||||
prev_event_content = prev_event.content
|
||||
|
||||
membership = event_content.get("membership", Membership.LEAVE)
|
||||
prev_membership = prev_event_content.get("membership", Membership.LEAVE)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user