Compare commits
451 Commits
v1.30.0
...
michaelkay
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
288d1a6aea | ||
|
|
fdbccc1e74 | ||
|
|
0e56f02d5d | ||
|
|
c7934aee2c | ||
|
|
5d405f7e7a | ||
|
|
5054eb291e | ||
|
|
47d2b49e2b | ||
|
|
1f507c2515 | ||
|
|
5ee8a1c50a | ||
|
|
7b7831bb63 | ||
|
|
a4aa56a0eb | ||
|
|
fa0f99e4f2 | ||
|
|
844b3e3f65 | ||
|
|
3f6530ed55 | ||
|
|
25757a3d47 | ||
|
|
6e774373c2 | ||
|
|
512e313f18 | ||
|
|
a574751a87 | ||
|
|
bde75f5f66 | ||
|
|
e33124a642 | ||
|
|
bed4fa29fd | ||
|
|
f5ab7d8306 | ||
|
|
029c9ef967 | ||
|
|
e6b27b480c | ||
|
|
43dc637136 | ||
|
|
00c62b9d07 | ||
|
|
82a91208d6 | ||
|
|
91fd180be1 | ||
|
|
fb4a4f9f15 | ||
|
|
5a4f09228d | ||
|
|
97d12dcf56 | ||
|
|
f4f65f4e99 | ||
|
|
863359a04f | ||
|
|
33a349df91 | ||
|
|
a41b1dc49f | ||
|
|
16744644f6 | ||
|
|
dbf46f3891 | ||
|
|
52984e9e69 | ||
|
|
ce2107eee1 | ||
|
|
8373e6254f | ||
|
|
1ff3bc332a | ||
|
|
172ddb3b45 | ||
|
|
d60af9305a | ||
|
|
bcb6b243e9 | ||
|
|
32457baa40 | ||
|
|
ab4cd7f802 | ||
|
|
e9b5e642c3 | ||
|
|
9250ee8650 | ||
|
|
bdbe2b12c2 | ||
|
|
43bcb1e54e | ||
|
|
cd2f831b9d | ||
|
|
4b43332131 | ||
|
|
77daff166d | ||
|
|
5ccc0785c1 | ||
|
|
b0a463f758 | ||
|
|
8a8d01d732 | ||
|
|
1c22954668 | ||
|
|
e675bbcc49 | ||
|
|
607367aeb1 | ||
|
|
ac6c5f198e | ||
|
|
db13a8607e | ||
|
|
cfb3096e33 | ||
|
|
7b6f857aa9 | ||
|
|
9eea5c43af | ||
|
|
104c490274 | ||
|
|
bbb7ca1f15 | ||
|
|
27ef82d972 | ||
|
|
9df3a8a19f | ||
|
|
5c4b13cd8f | ||
|
|
d74e8f2875 | ||
|
|
cc23d81a74 | ||
|
|
505ea932f5 | ||
|
|
5f224a4794 | ||
|
|
3f488bfded | ||
|
|
b4c1cfacc2 | ||
|
|
afe4c4e02e | ||
|
|
527f73d902 | ||
|
|
82fec809a5 | ||
|
|
b2ccc72a00 | ||
|
|
be777e325d | ||
|
|
25880bd441 | ||
|
|
cc86fbc9ad | ||
|
|
bd30967bd7 | ||
|
|
8fed03aa3e | ||
|
|
ba66e3dfef | ||
|
|
199ab854d6 | ||
|
|
c16bb06d25 | ||
|
|
d06f4ab693 | ||
|
|
8ba1086801 | ||
|
|
fea4b1d6ad | ||
|
|
ae91d50100 | ||
|
|
0d29112624 | ||
|
|
d6c7550cf5 | ||
|
|
4cf4c7dc99 | ||
|
|
6fdf5ef66b | ||
|
|
d4220574a2 | ||
|
|
1a9c8d5ee9 | ||
|
|
407dbf8574 | ||
|
|
8beca8e21f | ||
|
|
cf92310da2 | ||
|
|
89f795fe8a | ||
|
|
1c347c84bf | ||
|
|
0d8fb99cdf | ||
|
|
b3a9ad124c | ||
|
|
a902468354 | ||
|
|
84639b32ae | ||
|
|
dac5d5ae42 | ||
|
|
6bd2a39a7d | ||
|
|
309e30bae3 | ||
|
|
7ff7a415d1 | ||
|
|
6610343332 | ||
|
|
5adad58d95 | ||
|
|
d7c7f64f17 | ||
|
|
c4c84b67d5 | ||
|
|
617541c4c6 | ||
|
|
4d3ebc3620 | ||
|
|
ae4f6140f1 | ||
|
|
323cfe3efb | ||
|
|
b0d2add89d | ||
|
|
ff20747703 | ||
|
|
9192f1b9dd | ||
|
|
89d178e8e7 | ||
|
|
1c24e35e85 | ||
|
|
5debf3071c | ||
|
|
e9bd4bb388 | ||
|
|
649e48a799 | ||
|
|
9b0157686b | ||
|
|
8288218b29 | ||
|
|
da5e6eea45 | ||
|
|
2fdfa96ee6 | ||
|
|
fb3f1fb5c0 | ||
|
|
9b8212d256 | ||
|
|
aead826d2d | ||
|
|
4cd2a4ae3a | ||
|
|
66cd243e6f | ||
|
|
7b66a1f0d9 | ||
|
|
059e91bdce | ||
|
|
f86962cb6b | ||
|
|
03c694bb08 | ||
|
|
08d68c5296 | ||
|
|
568461b5ec | ||
|
|
6b73b8b70c | ||
|
|
936686ed2d | ||
|
|
74050d0c1c | ||
|
|
69111a8b2a | ||
|
|
d840ee5bde | ||
|
|
e3d811e85d | ||
|
|
578ad9fc48 | ||
|
|
9dbe34f0d0 | ||
|
|
93a0751302 | ||
|
|
bc936b5657 | ||
|
|
d6eae548a7 | ||
|
|
e439438b9b | ||
|
|
f8a1e0d1d2 | ||
|
|
8a29def84a | ||
|
|
77a166577a | ||
|
|
7d5268d37c | ||
|
|
c854d255e5 | ||
|
|
c660962d4d | ||
|
|
767bef0033 | ||
|
|
4d02bfd6e1 | ||
|
|
a099ab7d38 | ||
|
|
ce72a9ccdb | ||
|
|
bace86ed15 | ||
|
|
45bf455948 | ||
|
|
859663565c | ||
|
|
0876a5b641 | ||
|
|
5b5314ee41 | ||
|
|
aff9189149 | ||
|
|
2eda49a8db | ||
|
|
96b17d4e4f | ||
|
|
aadc131dc1 | ||
|
|
0a522121a0 | ||
|
|
0b5e2c8093 | ||
|
|
c665d154a2 | ||
|
|
31295b5a60 | ||
|
|
aebe20c452 | ||
|
|
508e0f9310 | ||
|
|
e04e7e830e | ||
|
|
5407e69732 | ||
|
|
2c59eb368c | ||
|
|
6d1a3e2bdd | ||
|
|
7fa4586e36 | ||
|
|
33b4aa8d99 | ||
|
|
627cf5def8 | ||
|
|
b409d51dee | ||
|
|
4a4e620f30 | ||
|
|
28889d8da5 | ||
|
|
15b2a50817 | ||
|
|
b852a8247d | ||
|
|
7b55cca011 | ||
|
|
a9577ab1f4 | ||
|
|
cb217d5d60 | ||
|
|
f4f5355bcf | ||
|
|
23bb2713d2 | ||
|
|
b2471e1109 | ||
|
|
610219d53d | ||
|
|
b464afe283 | ||
|
|
7657ad3ced | ||
|
|
721086a291 | ||
|
|
6e6b53ed3a | ||
|
|
601b50672d | ||
|
|
a7af389da0 | ||
|
|
99db0d76fd | ||
|
|
561b0f79bc | ||
|
|
8569f3cdef | ||
|
|
7b61e6f5d6 | ||
|
|
05241b3031 | ||
|
|
e01026d84d | ||
|
|
ee91c69ef7 | ||
|
|
e0eef47315 | ||
|
|
44d2ca2990 | ||
|
|
9240622c1a | ||
|
|
0dbba85e95 | ||
|
|
1ceeccb769 | ||
|
|
39883e85bd | ||
|
|
68f53b7a0e | ||
|
|
e679b008ff | ||
|
|
e80a5b7492 | ||
|
|
b272e7345f | ||
|
|
a81e0233e9 | ||
|
|
80898481ab | ||
|
|
9d4c716d85 | ||
|
|
d90b0946ed | ||
|
|
8d5762b0dc | ||
|
|
a7efbc5416 | ||
|
|
be362cb8f8 | ||
|
|
873ff9522b | ||
|
|
c1ee2999a0 | ||
|
|
9b2b386f76 | ||
|
|
65fe31786d | ||
|
|
70b6d1dfd6 | ||
|
|
ee62aed72e | ||
|
|
c02f26319d | ||
|
|
fdd182870c | ||
|
|
4102cb220a | ||
|
|
5299707329 | ||
|
|
43e01be158 | ||
|
|
589e080c6b | ||
|
|
24e48bc9ff | ||
|
|
576b62a6a3 | ||
|
|
ad2ba70959 | ||
|
|
a330505025 | ||
|
|
67b73fd147 | ||
|
|
c08e4dbadc | ||
|
|
6dbd498772 | ||
|
|
03b09b32d6 | ||
|
|
8f1711da0e | ||
|
|
6fb6c98f71 | ||
|
|
aad993f24d | ||
|
|
544e101c24 | ||
|
|
8699f380f0 | ||
|
|
e91a68ef3a | ||
|
|
9f5048c198 | ||
|
|
b3c40ba58a | ||
|
|
8d69193a42 | ||
|
|
bbcd19f2d0 | ||
|
|
3cd598135f | ||
|
|
1c8f2c34ff | ||
|
|
ca03f90ee7 | ||
|
|
9feee29d76 | ||
|
|
e7dcee13da | ||
|
|
7467738834 | ||
|
|
d75fb8ae22 | ||
|
|
ae25a8efef | ||
|
|
fc5be50d56 | ||
|
|
aadba440da | ||
|
|
ec94d6a590 | ||
|
|
42ce90c3f7 | ||
|
|
8467756dc1 | ||
|
|
613b443ff0 | ||
|
|
233b61ac61 | ||
|
|
f41c9d37d6 | ||
|
|
1048e2ca6a | ||
|
|
ce0ce1add3 | ||
|
|
b0bf1ea7bd | ||
|
|
2561b628af | ||
|
|
73c6630718 | ||
|
|
a189bb03ab | ||
|
|
404a2d70be | ||
|
|
ed8ccc3737 | ||
|
|
18b1a92162 | ||
|
|
199aa72d35 | ||
|
|
8f7dbbc14a | ||
|
|
27dbc9ac42 | ||
|
|
e9aa401994 | ||
|
|
9e9572c79e | ||
|
|
c7285607a3 | ||
|
|
a6e2546980 | ||
|
|
dc510e0e43 | ||
|
|
ed12338f35 | ||
|
|
bf3f8b8855 | ||
|
|
67acd1aa1b | ||
|
|
75c924430e | ||
|
|
6087c53830 | ||
|
|
b50fe65a22 | ||
|
|
17009e689b | ||
|
|
5d2f755d3f | ||
|
|
8d7c0264bc | ||
|
|
000d230901 | ||
|
|
eb0334b07c | ||
|
|
4d07dc0d18 | ||
|
|
0ea52872ab | ||
|
|
6868d53fe9 | ||
|
|
68af15637b | ||
|
|
4da63d9f6f | ||
|
|
085d69b0bd | ||
|
|
776fe6c184 | ||
|
|
0e07d2c7d5 | ||
|
|
90ec885805 | ||
|
|
5a28154c4d | ||
|
|
2fcb51e703 | ||
|
|
26f524872f | ||
|
|
88af0317a2 | ||
|
|
c10c71e70d | ||
|
|
93555af5c9 | ||
|
|
06622e4110 | ||
|
|
155efa9e36 | ||
|
|
3175edc5d8 | ||
|
|
d95252c01f | ||
|
|
5bd2e2c31d | ||
|
|
84528e4fb2 | ||
|
|
e4381ed514 | ||
|
|
d9235b9e29 | ||
|
|
ce5f3b1ba5 | ||
|
|
7b5c04312e | ||
|
|
f5bafd70f4 | ||
|
|
d97c3a6ce6 | ||
|
|
341c35614a | ||
|
|
fecf28319c | ||
|
|
345d8cfb69 | ||
|
|
b60d005156 | ||
|
|
6c232a69df | ||
|
|
e97c1df30c | ||
|
|
decb5698b3 | ||
|
|
62962e30e4 | ||
|
|
05413d4e20 | ||
|
|
ca46dcf683 | ||
|
|
d351be1567 | ||
|
|
c7f2eaf4f4 | ||
|
|
53d25116df | ||
|
|
08e25ffa0c | ||
|
|
1c148e442b | ||
|
|
acaca1b4e9 | ||
|
|
4777836b83 | ||
|
|
7da659dd6d | ||
|
|
77dfe51aba | ||
|
|
ef7865e2f2 | ||
|
|
5cb15c0443 | ||
|
|
b43172ffbc | ||
|
|
b4796d1814 | ||
|
|
482d06774a | ||
|
|
046d731fbd | ||
|
|
892f6c98ec | ||
|
|
7fafa2d954 | ||
|
|
1d63046542 | ||
|
|
4c238a9a91 | ||
|
|
002db39a36 | ||
|
|
c4074e4ab6 | ||
|
|
7960e814e5 | ||
|
|
080025e533 | ||
|
|
9accd63a38 | ||
|
|
3dd704ee9a | ||
|
|
28e28a1974 | ||
|
|
b699178aa1 | ||
|
|
c08c649fa1 | ||
|
|
5c0c4b4079 | ||
|
|
b55cdfaa31 | ||
|
|
34406cf22c | ||
|
|
f91aefd245 | ||
|
|
f8281f42c8 | ||
|
|
7171bdf279 | ||
|
|
9f2d14ee26 | ||
|
|
ead471e72d | ||
|
|
9a4011de46 | ||
|
|
33551be61b | ||
|
|
eeb29d99fd | ||
|
|
1a0c407e6b | ||
|
|
c4b37cbf18 | ||
|
|
7fa156af80 | ||
|
|
78825f4f1c | ||
|
|
6e15b5debe | ||
|
|
2e0d2879d0 | ||
|
|
128043072b | ||
|
|
b2fda9d20e | ||
|
|
3c8c5eabc2 | ||
|
|
2da2041e2e | ||
|
|
b5eef203f4 | ||
|
|
df73da691f | ||
|
|
30d054e0bb | ||
|
|
ebb3cc4ab6 | ||
|
|
17201abd53 | ||
|
|
2f141f4c41 | ||
|
|
638c0bf49b | ||
|
|
d1065e6f51 | ||
|
|
567863127a | ||
|
|
f5abc10724 | ||
|
|
bb795b56da | ||
|
|
4dd0604f61 | ||
|
|
c05d278ba0 | ||
|
|
49a3163958 | ||
|
|
1a568041fa | ||
|
|
c9db8b0c32 | ||
|
|
aa1bf10b91 | ||
|
|
5222907bea | ||
|
|
e1eb147f2a | ||
|
|
e43eb47c5f | ||
|
|
27eb4c45cd | ||
|
|
b136d7ff8f | ||
|
|
9e56e1ab30 | ||
|
|
742f757337 | ||
|
|
2f5dfe299c | ||
|
|
e4eec87c6a | ||
|
|
f793ff4571 | ||
|
|
195aae2f16 | ||
|
|
7c79f2cb72 | ||
|
|
f04e35c170 | ||
|
|
36bbac05bd | ||
|
|
e2a4b7681e | ||
|
|
957944eee4 | ||
|
|
bf425e533e | ||
|
|
ca21957b8a | ||
|
|
6a95270671 | ||
|
|
82781f5838 | ||
|
|
aae6d3ff69 | ||
|
|
9175225adf | ||
|
|
7a32fa0101 | ||
|
|
d46450195b | ||
|
|
c0128c1021 | ||
|
|
3320b7c9a4 | ||
|
|
4c22c9b0b6 | ||
|
|
6d6ea1bb40 | ||
|
|
9e38981ae4 | ||
|
|
463e7c2709 | ||
|
|
ce9d0b1d0c | ||
|
|
80786d5caf | ||
|
|
e18378c3e2 | ||
|
|
0ca2857baa | ||
|
|
e21c312e16 | ||
|
|
1031bd25f8 | ||
|
|
fae708c0e8 | ||
|
|
8f8ea91eef | ||
|
|
7a1406d144 | ||
|
|
6373874833 | ||
|
|
a79823e64b | ||
|
|
1766a5fdc0 | ||
|
|
e6b1ea3eb2 | ||
|
|
e5537cf983 | ||
|
|
43bb12e640 | ||
|
|
66dcbf47a3 | ||
|
|
a285fe05fd |
@@ -1,8 +0,0 @@
|
||||
# Black reformatting (#5482).
|
||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
||||
|
||||
# Target Python 3.5 with black (#8664).
|
||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,14 +6,13 @@
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.py[cod]
|
||||
*.pyc
|
||||
*.snap
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
|
||||
145
CHANGES.md
145
CHANGES.md
@@ -1,150 +1,9 @@
|
||||
Synapse 1.30.0 (2021-03-22)
|
||||
===========================
|
||||
|
||||
Note that this release deprecates the ability for appservices to
|
||||
call `POST /_matrix/client/r0/register` without the body parameter `type`. Appservice
|
||||
developers should use a `type` value of `m.login.application_service` as
|
||||
per [the spec](https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions).
|
||||
In future releases, calling this endpoint with an access token - but without a `m.login.application_service`
|
||||
type - will fail.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.30.0rc1 (2021-03-16)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add prometheus metrics for number of users successfully registering and logging in. ([\#9510](https://github.com/matrix-org/synapse/issues/9510), [\#9511](https://github.com/matrix-org/synapse/issues/9511), [\#9573](https://github.com/matrix-org/synapse/issues/9573))
|
||||
- Add `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time` prometheus metrics, which monitor federation delays by reporting the timestamps of messages sent and received to a set of remote servers. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Add support for generating JSON Web Tokens dynamically for use as OIDC client secrets. ([\#9549](https://github.com/matrix-org/synapse/issues/9549))
|
||||
- Optimise handling of incomplete room history for incoming federation. ([\#9601](https://github.com/matrix-org/synapse/issues/9601))
|
||||
- Finalise support for allowing clients to pick an SSO Identity Provider ([MSC2858](https://github.com/matrix-org/matrix-doc/pull/2858)). ([\#9617](https://github.com/matrix-org/synapse/issues/9617))
|
||||
- Tell spam checker modules about the SSO IdP a user registered through if one was used. ([\#9626](https://github.com/matrix-org/synapse/issues/9626))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix long-standing bug when generating thumbnails for some images with transparency: `TypeError: cannot unpack non-iterable int object`. ([\#9473](https://github.com/matrix-org/synapse/issues/9473))
|
||||
- Purge chain cover indexes for events that were purged prior to Synapse v1.29.0. ([\#9542](https://github.com/matrix-org/synapse/issues/9542), [\#9583](https://github.com/matrix-org/synapse/issues/9583))
|
||||
- Fix bug where federation requests were not correctly retried on 5xx responses. ([\#9567](https://github.com/matrix-org/synapse/issues/9567))
|
||||
- Fix re-activating an account via the admin API when local passwords are disabled. ([\#9587](https://github.com/matrix-org/synapse/issues/9587))
|
||||
- Fix a bug introduced in Synapse 1.20 which caused incoming federation transactions to stack up, causing slow recovery from outages. ([\#9597](https://github.com/matrix-org/synapse/issues/9597))
|
||||
- Fix a bug introduced in v1.28.0 where the OpenID Connect callback endpoint could error with a `MacaroonInitException`. ([\#9620](https://github.com/matrix-org/synapse/issues/9620))
|
||||
- Fix Internal Server Error on `GET /_synapse/client/saml2/authn_response` request. ([\#9623](https://github.com/matrix-org/synapse/issues/9623))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Make use of an improved malloc implementation (`jemalloc`) in the docker image. ([\#8553](https://github.com/matrix-org/synapse/issues/8553))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add relayd entry to reverse proxy example configurations. ([\#9508](https://github.com/matrix-org/synapse/issues/9508))
|
||||
- Improve the SAML2 upgrade notes for 1.27.0. ([\#9550](https://github.com/matrix-org/synapse/issues/9550))
|
||||
- Link to the "List user's media" admin API from the media admin API docs. ([\#9571](https://github.com/matrix-org/synapse/issues/9571))
|
||||
- Clarify the spam checker modules documentation example to mention that `parse_config` is a required method. ([\#9580](https://github.com/matrix-org/synapse/issues/9580))
|
||||
- Clarify the sample configuration for `stats` settings. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The `synapse_federation_last_sent_pdu_age` and `synapse_federation_last_received_pdu_age` prometheus metrics have been removed. They are replaced by `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time`. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Registering an Application Service user without using the `m.login.application_service` login type will be unsupported in an upcoming Synapse release. ([\#9559](https://github.com/matrix-org/synapse/issues/9559))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add tests to ResponseCache. ([\#9458](https://github.com/matrix-org/synapse/issues/9458))
|
||||
- Add type hints to purge room and server notice admin API. ([\#9520](https://github.com/matrix-org/synapse/issues/9520))
|
||||
- Add extra logging to ObservableDeferred when callbacks throw exceptions. ([\#9523](https://github.com/matrix-org/synapse/issues/9523))
|
||||
- Fix incorrect type hints. ([\#9528](https://github.com/matrix-org/synapse/issues/9528), [\#9543](https://github.com/matrix-org/synapse/issues/9543), [\#9591](https://github.com/matrix-org/synapse/issues/9591), [\#9608](https://github.com/matrix-org/synapse/issues/9608), [\#9618](https://github.com/matrix-org/synapse/issues/9618))
|
||||
- Add an additional test for purging a room. ([\#9541](https://github.com/matrix-org/synapse/issues/9541))
|
||||
- Add a `.git-blame-ignore-revs` file with the hashes of auto-formatting. ([\#9560](https://github.com/matrix-org/synapse/issues/9560))
|
||||
- Increase the threshold before which outbound federation to a server goes into "catch up" mode, which is expensive for the remote server to handle. ([\#9561](https://github.com/matrix-org/synapse/issues/9561))
|
||||
- Fix spurious errors reported by the `config-lint.sh` script. ([\#9562](https://github.com/matrix-org/synapse/issues/9562))
|
||||
- Fix type hints and tests for BlacklistingAgentWrapper and BlacklistingReactorWrapper. ([\#9563](https://github.com/matrix-org/synapse/issues/9563))
|
||||
- Do not have mypy ignore type hints from unpaddedbase64. ([\#9568](https://github.com/matrix-org/synapse/issues/9568))
|
||||
- Improve efficiency of calculating the auth chain in large rooms. ([\#9576](https://github.com/matrix-org/synapse/issues/9576))
|
||||
- Convert `synapse.types.Requester` to an `attrs` class. ([\#9586](https://github.com/matrix-org/synapse/issues/9586))
|
||||
- Add logging for redis connection setup. ([\#9590](https://github.com/matrix-org/synapse/issues/9590))
|
||||
- Improve logging when processing incoming transactions. ([\#9596](https://github.com/matrix-org/synapse/issues/9596))
|
||||
- Remove unused `stats.retention` setting, and emit a warning if stats are disabled. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
- Prevent attempting to bundle aggregations for state events in /context APIs. ([\#9619](https://github.com/matrix-org/synapse/issues/9619))
|
||||
|
||||
|
||||
Synapse 1.29.0 (2021-03-08)
|
||||
===========================
|
||||
Synapse 1.xx.0
|
||||
==============
|
||||
|
||||
Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.29.0rc1 (2021-03-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add rate limiters to cross-user key sharing requests. ([\#8957](https://github.com/matrix-org/synapse/issues/8957))
|
||||
- Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel. ([\#8978](https://github.com/matrix-org/synapse/issues/8978))
|
||||
- Add some configuration settings to make users' profile data more private. ([\#9203](https://github.com/matrix-org/synapse/issues/9203))
|
||||
- The `no_proxy` and `NO_PROXY` environment variables are now respected in proxied HTTP clients with the lowercase form taking precedence if both are present. Additionally, the lowercase `https_proxy` environment variable is now respected in proxied HTTP clients on top of existing support for the uppercase `HTTPS_PROXY` form and takes precedence if both are present. Contributed by Timothy Leung. ([\#9372](https://github.com/matrix-org/synapse/issues/9372))
|
||||
- Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users. ([\#9383](https://github.com/matrix-org/synapse/issues/9383), [\#9385](https://github.com/matrix-org/synapse/issues/9385))
|
||||
- Add support for regenerating thumbnails if they have been deleted but the original image is still stored. ([\#9438](https://github.com/matrix-org/synapse/issues/9438))
|
||||
- Add support for `X-Forwarded-Proto` header when using a reverse proxy. ([\#9472](https://github.com/matrix-org/synapse/issues/9472), [\#9501](https://github.com/matrix-org/synapse/issues/9501), [\#9512](https://github.com/matrix-org/synapse/issues/9512), [\#9539](https://github.com/matrix-org/synapse/issues/9539))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug where users' pushers were not all deleted when they deactivated their account. ([\#9285](https://github.com/matrix-org/synapse/issues/9285), [\#9516](https://github.com/matrix-org/synapse/issues/9516))
|
||||
- Fix a bug where a lot of unnecessary presence updates were sent when joining a room. ([\#9402](https://github.com/matrix-org/synapse/issues/9402))
|
||||
- Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results. ([\#9416](https://github.com/matrix-org/synapse/issues/9416))
|
||||
- Fix a bug in single sign-on which could cause a "No session cookie found" error. ([\#9436](https://github.com/matrix-org/synapse/issues/9436))
|
||||
- Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined. ([\#9440](https://github.com/matrix-org/synapse/issues/9440))
|
||||
- Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`. ([\#9449](https://github.com/matrix-org/synapse/issues/9449))
|
||||
- Fix deleting pushers when using sharded pushers. ([\#9465](https://github.com/matrix-org/synapse/issues/9465), [\#9466](https://github.com/matrix-org/synapse/issues/9466), [\#9479](https://github.com/matrix-org/synapse/issues/9479), [\#9536](https://github.com/matrix-org/synapse/issues/9536))
|
||||
- Fix missing startup checks for the consistency of certain PostgreSQL sequences. ([\#9470](https://github.com/matrix-org/synapse/issues/9470))
|
||||
- Fix a long-standing bug where the media repository could leak file descriptors while previewing media. ([\#9497](https://github.com/matrix-org/synapse/issues/9497))
|
||||
- Properly purge the event chain cover index when purging history. ([\#9498](https://github.com/matrix-org/synapse/issues/9498))
|
||||
- Fix missing chain cover index due to a schema delta not being applied correctly. Only affected servers that ran development versions. ([\#9503](https://github.com/matrix-org/synapse/issues/9503))
|
||||
- Fix a bug introduced in v1.25.0 where `/_synapse/admin/join/` would fail when given a room alias. ([\#9506](https://github.com/matrix-org/synapse/issues/9506))
|
||||
- Prevent presence background jobs from running when presence is disabled. ([\#9530](https://github.com/matrix-org/synapse/issues/9530))
|
||||
- Fix rare edge case that caused a background update to fail if the server had rejected an event that had duplicate auth events. ([\#9537](https://github.com/matrix-org/synapse/issues/9537))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update the example systemd config to propagate reloads to individual units. ([\#9463](https://github.com/matrix-org/synapse/issues/9463))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add documentation and type hints to `parse_duration`. ([\#9432](https://github.com/matrix-org/synapse/issues/9432))
|
||||
- Remove vestiges of `uploads_path` configuration setting. ([\#9462](https://github.com/matrix-org/synapse/issues/9462))
|
||||
- Add a comment about systemd-python. ([\#9464](https://github.com/matrix-org/synapse/issues/9464))
|
||||
- Test that we require validated email for email pushers. ([\#9496](https://github.com/matrix-org/synapse/issues/9496))
|
||||
- Allow python to generate bytecode for synapse. ([\#9502](https://github.com/matrix-org/synapse/issues/9502))
|
||||
- Fix incorrect type hints. ([\#9515](https://github.com/matrix-org/synapse/issues/9515), [\#9518](https://github.com/matrix-org/synapse/issues/9518))
|
||||
- Add type hints to device and event report admin API. ([\#9519](https://github.com/matrix-org/synapse/issues/9519))
|
||||
- Add type hints to user admin API. ([\#9521](https://github.com/matrix-org/synapse/issues/9521))
|
||||
- Bump the versions of mypy and mypy-zope used for static type checking. ([\#9529](https://github.com/matrix-org/synapse/issues/9529))
|
||||
|
||||
|
||||
Synapse 1.28.0 (2021-02-25)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -20,10 +20,9 @@ recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.p8
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
include tests/http/ca.crt
|
||||
include tests/http/ca.key
|
||||
include tests/http/server.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
|
||||
@@ -183,9 +183,8 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
|
||||
13
UPGRADE.rst
13
UPGRADE.rst
@@ -98,9 +98,9 @@ will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client. See the `reverse proxy documentation
|
||||
<docs/reverse_proxy.md>`_, where the example configurations have been updated to
|
||||
show how to set this header.
|
||||
indicate the protocol used by the client. See the [reverse proxy
|
||||
documentation](docs/reverse_proxy.md), where the example configurations have
|
||||
been updated to show how to set this header.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
@@ -124,13 +124,6 @@ This version changes the URI used for callbacks from OAuth2 and SAML2 identity p
|
||||
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
|
||||
"ACS location" (also known as "allowed callback URLs") at the identity provider.
|
||||
|
||||
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
|
||||
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
|
||||
provider uses this property to validate or otherwise identify Synapse, its configuration
|
||||
will need to be updated to use the new URL. Alternatively you could create a new, separate
|
||||
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
|
||||
the existing "EntityDescriptor" as they were.
|
||||
|
||||
Changes to HTML templates
|
||||
-------------------------
|
||||
|
||||
|
||||
1
changelog.d/8675.misc
Normal file
1
changelog.d/8675.misc
Normal file
@@ -0,0 +1 @@
|
||||
Temporarily drop cross-user m.room_key_request to_device messages over performance concerns.
|
||||
1
changelog.d/8957.feature
Normal file
1
changelog.d/8957.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add rate limiters to cross-user key sharing requests.
|
||||
1
changelog.d/8978.feature
Normal file
1
changelog.d/8978.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel.
|
||||
1
changelog.d/9203.feature
Normal file
1
changelog.d/9203.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add some configuration settings to make users' profile data more private.
|
||||
1
changelog.d/9285.bugfix
Normal file
1
changelog.d/9285.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where users' pushers were not all deleted when they deactivated their account.
|
||||
1
changelog.d/9358.misc
Normal file
1
changelog.d/9358.misc
Normal file
@@ -0,0 +1 @@
|
||||
Added a fix that invalidates cache for empty timed-out sync responses.
|
||||
1
changelog.d/9383.feature
Normal file
1
changelog.d/9383.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
1
changelog.d/9385.feature
Normal file
1
changelog.d/9385.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
1
changelog.d/9402.bugfix
Normal file
1
changelog.d/9402.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where a lot of unnecessary presence updates were sent when joining a room.
|
||||
1
changelog.d/9416.bugfix
Normal file
1
changelog.d/9416.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results.
|
||||
1
changelog.d/9432.misc
Normal file
1
changelog.d/9432.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add documentation and type hints to `parse_duration`.
|
||||
1
changelog.d/9436.bugfix
Normal file
1
changelog.d/9436.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug in single sign-on which could cause a "No session cookie found" error.
|
||||
1
changelog.d/9438.feature
Normal file
1
changelog.d/9438.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for regenerating thumbnails if they have been deleted but the original image is still stored.
|
||||
1
changelog.d/9440.bugfix
Normal file
1
changelog.d/9440.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined.
|
||||
1
changelog.d/9449.bugfix
Normal file
1
changelog.d/9449.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`.
|
||||
1
changelog.d/9462.misc
Normal file
1
changelog.d/9462.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove vestiges of `uploads_path` configuration setting.
|
||||
1
changelog.d/9463.doc
Normal file
1
changelog.d/9463.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update the example systemd config to propagate reloads to individual units.
|
||||
1
changelog.d/9464.misc
Normal file
1
changelog.d/9464.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a comment about systemd-python.
|
||||
1
changelog.d/9465.bugfix
Normal file
1
changelog.d/9465.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
1
changelog.d/9466.bugfix
Normal file
1
changelog.d/9466.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
1
changelog.d/9470.bugfix
Normal file
1
changelog.d/9470.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix missing startup checks for the consistency of certain PostgreSQL sequences.
|
||||
1
changelog.d/9472.feature
Normal file
1
changelog.d/9472.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
1
changelog.d/9479.bugfix
Normal file
1
changelog.d/9479.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
1
changelog.d/9496.misc
Normal file
1
changelog.d/9496.misc
Normal file
@@ -0,0 +1 @@
|
||||
Test that we require validated email for email pushers.
|
||||
1
changelog.d/9501.feature
Normal file
1
changelog.d/9501.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -58,10 +58,10 @@ trap "rm -r $tmpdir" EXIT
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
@@ -87,7 +87,7 @@ PYTHONPATH="$tmpdir" \
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
# build the log config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
|
||||
16
debian/changelog
vendored
16
debian/changelog
vendored
@@ -1,19 +1,3 @@
|
||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
|
||||
|
||||
matrix-synapse-py3 (1.29.0) stable; urgency=medium
|
||||
|
||||
[ Jonathan de Jong ]
|
||||
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.29.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
|
||||
|
||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.28.0.
|
||||
|
||||
2
debian/synctl.1
vendored
2
debian/synctl.1
vendored
@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
|
||||
.
|
||||
.nf
|
||||
|
||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
.
|
||||
.fi
|
||||
.
|
||||
|
||||
2
debian/synctl.ronn
vendored
2
debian/synctl.ronn
vendored
@@ -41,7 +41,7 @@ process.
|
||||
|
||||
Configuration file may be generated as follows:
|
||||
|
||||
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
|
||||
## ENVIRONMENT
|
||||
|
||||
|
||||
@@ -69,7 +69,6 @@ RUN apt-get update && apt-get install -y \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
|
||||
@@ -204,8 +204,3 @@ healthcheck:
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.md)
|
||||
@@ -3,7 +3,6 @@
|
||||
import codecs
|
||||
import glob
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -214,13 +213,6 @@ def main(args, environ):
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
@@ -256,9 +248,9 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = ["python"] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
else:
|
||||
os.execve("/usr/local/bin/python", args, environ)
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Contents
|
||||
- [Querying media](#querying-media)
|
||||
* [List all media in a room](#list-all-media-in-a-room)
|
||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||
- [List all media in a room](#list-all-media-in-a-room)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
@@ -12,11 +10,7 @@
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# Querying media
|
||||
|
||||
These APIs allow extracting media information from the homeserver.
|
||||
|
||||
## List all media in a room
|
||||
# List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
However, it only shows media from unencrypted events or rooms.
|
||||
@@ -42,12 +36,6 @@ The API returns a JSON body like the following:
|
||||
}
|
||||
```
|
||||
|
||||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
|
||||
@@ -226,7 +226,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
idp_brand: "org.matrix.github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -252,7 +252,7 @@ oidc_providers:
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
idp_brand: "org.matrix.google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -299,7 +299,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
idp_brand: "org.matrix.gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -334,7 +334,7 @@ Synapse config:
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
idp_brand: "org.matrix.facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -386,7 +386,7 @@ oidc_providers:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
@@ -401,7 +401,8 @@ oidc_providers:
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_auth_method: none
|
||||
# Needed until https://github.com/matrix-org/synapse/issues/9212 is fixed
|
||||
client_secret: "dontcare"
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
@@ -409,40 +410,3 @@ oidc_providers:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
## Apple
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
@@ -3,9 +3,8 @@
|
||||
It is recommended to put a reverse proxy such as
|
||||
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
|
||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
||||
[HAProxy](https://www.haproxy.org/) or
|
||||
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
|
||||
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
|
||||
of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
privileges.
|
||||
@@ -54,8 +53,6 @@ server {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
@@ -163,52 +160,6 @@ backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Relayd
|
||||
|
||||
```
|
||||
table <webserver> { 127.0.0.1 }
|
||||
table <matrixserver> { 127.0.0.1 }
|
||||
|
||||
http protocol "https" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
match header set "X-Forwarded-For" value "$REMOTE_ADDR"
|
||||
match header set "X-Forwarded-Proto" value "https"
|
||||
|
||||
# set CORS header for .well-known/matrix/server, .well-known/matrix/client
|
||||
# httpd does not support setting headers, so do it here
|
||||
match request path "/.well-known/matrix/*" tag "matrix-cors"
|
||||
match response tagged "matrix-cors" header set "Access-Control-Allow-Origin" value "*"
|
||||
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
|
||||
# pass on non-matrix traffic to webserver
|
||||
pass forward to <webserver>
|
||||
}
|
||||
|
||||
relay "https_traffic" {
|
||||
listen on egress port 443 tls
|
||||
protocol "https"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
forward to <webserver> port 8080 check tcp
|
||||
}
|
||||
|
||||
http protocol "matrix" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
block
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
}
|
||||
|
||||
relay "matrix_federation" {
|
||||
listen on egress port 8448 tls
|
||||
protocol "matrix"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
}
|
||||
```
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
You will also want to set `bind_addresses: ['127.0.0.1']` and
|
||||
|
||||
@@ -89,7 +89,8 @@ pid_file: DATADIR/homeserver.pid
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
@@ -1779,26 +1780,7 @@ saml2_config:
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -1919,7 +1901,7 @@ oidc_providers:
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: github
|
||||
# idp_brand: org.matrix.github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -2645,20 +2627,19 @@ user_directory:
|
||||
|
||||
|
||||
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
#
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
|
||||
|
||||
# Server Notices room configuration
|
||||
|
||||
@@ -14,7 +14,6 @@ The Python class is instantiated with two objects:
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
All the methods must be defined.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
@@ -25,7 +24,6 @@ well as some specific methods:
|
||||
* `user_may_publish_room`
|
||||
* `check_username_for_spam`
|
||||
* `check_registration_for_spam`
|
||||
* `check_media_file_for_spam`
|
||||
|
||||
The details of each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
@@ -33,10 +31,6 @@ are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
The `ModuleApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals.
|
||||
|
||||
Additionally, a `parse_config` method is mandatory and receives the plugin config
|
||||
dictionary. After parsing, It must return an object which will be
|
||||
passed to `__init__` later.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
@@ -47,10 +41,6 @@ class ExampleSpamChecker:
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config):
|
||||
return config
|
||||
|
||||
async def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
@@ -69,13 +59,7 @@ class ExampleSpamChecker:
|
||||
async def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
):
|
||||
async def check_registration_for_spam(self, email_threepid, username, request_info):
|
||||
return RegistrationBehaviour.ALLOW # allow all registrations
|
||||
|
||||
async def check_media_file_for_spam(self, file_wrapper, file_info):
|
||||
|
||||
4
mypy.ini
4
mypy.ini
@@ -69,7 +69,6 @@ files =
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/stringutils.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
@@ -117,6 +116,9 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-unpaddedbase64]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
@@ -2,14 +2,9 @@
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Restore backup of sample config upon script exit
|
||||
trap "mv docs/sample_config.yaml.bak docs/sample_config.yaml" EXIT
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
diff docs/sample_config.yaml docs/sample_config.yaml.bak
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
|
||||
@@ -47,7 +47,6 @@ from synapse.storage.databases.main.events_bg_updates import (
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
@@ -178,7 +177,6 @@ class Store(
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
EndToEndKeyBackgroundStore,
|
||||
StatsStore,
|
||||
PusherWorkerStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
@@ -3,7 +3,6 @@ test_suite = tests
|
||||
|
||||
[check-manifest]
|
||||
ignore =
|
||||
.git-blame-ignore-revs
|
||||
contrib
|
||||
contrib/*
|
||||
docs/*
|
||||
|
||||
2
setup.py
2
setup.py
@@ -102,7 +102,7 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.11"]
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
|
||||
@@ -17,9 +17,7 @@
|
||||
"""
|
||||
from typing import Any, List, Optional, Type, Union
|
||||
|
||||
from twisted.internet import protocol
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
class RedisProtocol:
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
async def ping(self) -> None: ...
|
||||
async def set(
|
||||
@@ -54,7 +52,7 @@ def lazyConnection(
|
||||
|
||||
class ConnectionHandler: ...
|
||||
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
class RedisFactory:
|
||||
continueTrying: bool
|
||||
handler: RedisProtocol
|
||||
pool: List[RedisProtocol]
|
||||
|
||||
@@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.30.0"
|
||||
__version__ = "1.28.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -39,7 +39,6 @@ from synapse.logging import opentracing as opentracing
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -164,7 +163,7 @@ class Auth:
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
request: Request,
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
@@ -409,7 +408,7 @@ class Auth:
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
user_id = get_value_from_macaroon(macaroon, "user_id")
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
@@ -417,12 +416,7 @@ class Auth:
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
KeyError,
|
||||
TypeError,
|
||||
ValueError,
|
||||
):
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if rights == "access":
|
||||
@@ -430,6 +424,27 @@ class Auth:
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def get_user_id_from_macaroon(self, macaroon):
|
||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||
|
||||
Does *not* validate the macaroon.
|
||||
|
||||
Args:
|
||||
macaroon (pymacaroons.Macaroon): The macaroon to validate
|
||||
|
||||
Returns:
|
||||
(str) user id
|
||||
|
||||
Raises:
|
||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||
macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
@@ -450,13 +465,21 @@ class Auth:
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
satisfy_expiry(v, self.clock.time_msec)
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
|
||||
def _verify_expiry(self, caveat):
|
||||
prefix = "time < "
|
||||
if not caveat.startswith(prefix):
|
||||
return False
|
||||
expiry = int(caveat[len(prefix) :])
|
||||
now = self.hs.get_clock().time_msec()
|
||||
return now < expiry
|
||||
|
||||
def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
|
||||
@@ -17,6 +17,8 @@ import sys
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
|
||||
@@ -23,7 +23,6 @@ from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
@@ -191,7 +190,7 @@ class KeyUploadServlet(RestServlet):
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
async def on_POST(self, request: Request, device_id: Optional[str]):
|
||||
async def on_POST(self, request, device_id):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
@@ -224,12 +223,10 @@ class KeyUploadServlet(RestServlet):
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
# Add the previous hop the the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
@@ -242,14 +239,6 @@ class KeyUploadServlet(RestServlet):
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
|
||||
@@ -90,7 +90,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
async def query_user(self, service, user_id):
|
||||
|
||||
@@ -212,8 +212,9 @@ class Config:
|
||||
|
||||
@classmethod
|
||||
def read_file(cls, file_path, config_name):
|
||||
"""Deprecated: call read_file directly"""
|
||||
return read_file(file_path, (config_name,))
|
||||
cls.check_file(file_path, config_name)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
|
||||
def read_template(self, filename: str) -> jinja2.Template:
|
||||
"""Load a template file from disk.
|
||||
@@ -893,35 +894,4 @@ class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
return self._get_instance(key)
|
||||
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str:
|
||||
"""Check the given file exists, and read it into a string
|
||||
|
||||
If it does not, emit an error indicating the problem
|
||||
|
||||
Args:
|
||||
file_path: the file to be read
|
||||
config_path: where in the configuration file_path came from, so that a useful
|
||||
error can be emitted if it does not exist.
|
||||
Returns:
|
||||
content of the file.
|
||||
Raises:
|
||||
ConfigError if there is a problem reading the file.
|
||||
"""
|
||||
if not isinstance(file_path, str):
|
||||
raise ConfigError("%r is not a string", config_path)
|
||||
|
||||
try:
|
||||
os.stat(file_path)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
except OSError as e:
|
||||
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Config",
|
||||
"RootConfig",
|
||||
"ShardedWorkerHandlingConfig",
|
||||
"RoutableShardedWorkerHandlingConfig",
|
||||
"read_file",
|
||||
]
|
||||
__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
|
||||
|
||||
@@ -152,5 +152,3 @@ class ShardedWorkerHandlingConfig:
|
||||
|
||||
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
def get_instance(self, key: str) -> str: ...
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...
|
||||
|
||||
@@ -21,10 +21,8 @@ import threading
|
||||
from string import Template
|
||||
|
||||
import yaml
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.logger import (
|
||||
ILogObserver,
|
||||
LogBeginner,
|
||||
STDLibLogObserver,
|
||||
eventAsText,
|
||||
@@ -229,8 +227,7 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
|
||||
|
||||
threadlocal = threading.local()
|
||||
|
||||
@implementer(ILogObserver)
|
||||
def _log(event: dict) -> None:
|
||||
def _log(event):
|
||||
if "log_text" in event:
|
||||
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
|
||||
return
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, Mapping, Optional, Tuple, Type
|
||||
from typing import Iterable, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.types import Collection, JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ._base import Config, ConfigError, read_file
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
|
||||
@@ -97,26 +97,7 @@ class OIDCConfig(Config):
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -237,7 +218,7 @@ class OIDCConfig(Config):
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: github
|
||||
# idp_brand: org.matrix.github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -259,7 +240,7 @@ class OIDCConfig(Config):
|
||||
# jsonschema definition of the configuration settings for an oidc identity provider
|
||||
OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"required": ["issuer", "client_id"],
|
||||
"required": ["issuer", "client_id", "client_secret"],
|
||||
"properties": {
|
||||
"idp_id": {
|
||||
"type": "string",
|
||||
@@ -272,12 +253,7 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"idp_icon": {"type": "string"},
|
||||
"idp_brand": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
},
|
||||
"idp_unstable_brand": {
|
||||
"type": "string",
|
||||
# MSC2758-style namespaced identifier
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
@@ -286,30 +262,6 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"issuer": {"type": "string"},
|
||||
"client_id": {"type": "string"},
|
||||
"client_secret": {"type": "string"},
|
||||
"client_secret_jwt_key": {
|
||||
"type": "object",
|
||||
"required": ["jwt_header"],
|
||||
"oneOf": [
|
||||
{"required": ["key"]},
|
||||
{"required": ["key_file"]},
|
||||
],
|
||||
"properties": {
|
||||
"key": {"type": "string"},
|
||||
"key_file": {"type": "string"},
|
||||
"jwt_header": {
|
||||
"type": "object",
|
||||
"required": ["alg"],
|
||||
"properties": {
|
||||
"alg": {"type": "string"},
|
||||
},
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
"jwt_payload": {
|
||||
"type": "object",
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"client_auth_method": {
|
||||
"type": "string",
|
||||
# the following list is the same as the keys of
|
||||
@@ -452,31 +404,15 @@ def _parse_oidc_config_dict(
|
||||
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
|
||||
) from e
|
||||
|
||||
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
|
||||
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
|
||||
if client_secret_jwt_key_config is not None:
|
||||
keyfile = client_secret_jwt_key_config.get("key_file")
|
||||
if keyfile:
|
||||
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
|
||||
else:
|
||||
key = client_secret_jwt_key_config["key"]
|
||||
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
|
||||
key=key,
|
||||
jwt_header=client_secret_jwt_key_config["jwt_header"],
|
||||
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
|
||||
)
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
idp_icon=idp_icon,
|
||||
idp_brand=oidc_config.get("idp_brand"),
|
||||
unstable_idp_brand=oidc_config.get("unstable_idp_brand"),
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_secret=oidc_config["client_secret"],
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
scopes=oidc_config.get("scopes", ["openid"]),
|
||||
authorization_endpoint=oidc_config.get("authorization_endpoint"),
|
||||
@@ -491,18 +427,6 @@ def _parse_oidc_config_dict(
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderClientSecretJwtKey:
|
||||
# a pem-encoded signing key
|
||||
key = attr.ib(type=str)
|
||||
|
||||
# properties to include in the JWT header
|
||||
jwt_header = attr.ib(type=Mapping[str, str])
|
||||
|
||||
# properties to include in the JWT payload.
|
||||
jwt_payload = attr.ib(type=Mapping[str, str])
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderConfig:
|
||||
# a unique identifier for this identity provider. Used in the 'user_external_ids'
|
||||
@@ -518,9 +442,6 @@ class OidcProviderConfig:
|
||||
# Optional brand identifier for this IdP.
|
||||
idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# Optional brand identifier for the unstable API (see MSC2858).
|
||||
unstable_idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# whether the OIDC discovery mechanism is used to discover endpoints
|
||||
discover = attr.ib(type=bool)
|
||||
|
||||
@@ -531,13 +452,8 @@ class OidcProviderConfig:
|
||||
# oauth2 client id to use
|
||||
client_id = attr.ib(type=str)
|
||||
|
||||
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
|
||||
# a secret.
|
||||
client_secret = attr.ib(type=Optional[str])
|
||||
|
||||
# key to use to construct a JWT to use as a client secret. May be `None` if
|
||||
# `client_secret` is set.
|
||||
client_secret_jwt_key = attr.ib(type=Optional[OidcProviderClientSecretJwtKey])
|
||||
# oauth2 client secret to use
|
||||
client_secret = attr.ib(type=str)
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are 'client_secret_basic', 'client_secret_post' and
|
||||
|
||||
@@ -841,7 +841,8 @@ class ServerConfig(Config):
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
|
||||
@@ -13,22 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from ._base import Config
|
||||
|
||||
ROOM_STATS_DISABLED_WARN = """\
|
||||
WARNING: room/user statistics have been disabled via the stats.enabled
|
||||
configuration setting. This means that certain features (such as the room
|
||||
directory) will not operate correctly. Future versions of Synapse may ignore
|
||||
this setting.
|
||||
|
||||
To fix this warning, remove the stats.enabled setting from your configuration
|
||||
file.
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
@@ -40,29 +28,30 @@ class StatsConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.stats_enabled = True
|
||||
self.stats_bucket_size = 86400 * 1000
|
||||
self.stats_retention = sys.maxsize
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
|
||||
self.stats_bucket_size = self.parse_duration(
|
||||
stats_config.get("bucket_size", "1d")
|
||||
)
|
||||
if not self.stats_enabled:
|
||||
logger.warning(ROOM_STATS_DISABLED_WARN)
|
||||
self.stats_retention = self.parse_duration(
|
||||
stats_config.get("retention", "%ds" % (sys.maxsize,))
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
#
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
"""
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
@@ -28,8 +27,6 @@ if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
import synapse.server
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
@@ -193,7 +190,6 @@ class SpamChecker:
|
||||
email_threepid: Optional[dict],
|
||||
username: Optional[str],
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> RegistrationBehaviour:
|
||||
"""Checks if we should allow the given registration request.
|
||||
|
||||
@@ -202,9 +198,6 @@ class SpamChecker:
|
||||
username: The request user name, if any
|
||||
request_info: List of tuples of user agent and IP that
|
||||
were used during the registration process.
|
||||
auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml",
|
||||
"cas". If any. Note this does not include users registered
|
||||
via a password provider.
|
||||
|
||||
Returns:
|
||||
Enum for how the request should be handled
|
||||
@@ -215,25 +208,9 @@ class SpamChecker:
|
||||
# spam checker
|
||||
checker = getattr(spam_checker, "check_registration_for_spam", None)
|
||||
if checker:
|
||||
# Provide auth_provider_id if the function supports it
|
||||
checker_args = inspect.signature(checker)
|
||||
if len(checker_args.parameters) == 4:
|
||||
d = checker(
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
)
|
||||
elif len(checker_args.parameters) == 3:
|
||||
d = checker(email_threepid, username, request_info)
|
||||
else:
|
||||
logger.error(
|
||||
"Invalid signature for %s.check_registration_for_spam. Denying registration",
|
||||
spam_checker.__module__,
|
||||
)
|
||||
return RegistrationBehaviour.DENY
|
||||
|
||||
behaviour = await maybe_awaitable(d)
|
||||
behaviour = await maybe_awaitable(
|
||||
checker(email_threepid, username, request_info)
|
||||
)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
|
||||
@@ -22,7 +22,6 @@ from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
@@ -91,15 +90,16 @@ pdu_process_time = Histogram(
|
||||
"Time taken to process an event",
|
||||
)
|
||||
|
||||
last_pdu_ts_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_time",
|
||||
"The timestamp of the last PDU which was successfully received from the given domain",
|
||||
|
||||
last_pdu_age_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_age",
|
||||
"The age (in seconds) of the last PDU successfully received from the given domain",
|
||||
labelnames=("server_name",),
|
||||
)
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
def __init__(self, hs):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
@@ -112,15 +112,14 @@ class FederationServer(FederationBase):
|
||||
# with FederationHandlerRegistry.
|
||||
hs.get_directory_handler()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._federation_ratelimiter = hs.get_federation_ratelimiter()
|
||||
|
||||
# origins that we are currently processing a transaction from.
|
||||
# a dict from origin to txn id.
|
||||
self._active_transactions = {} # type: Dict[str, str]
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
|
||||
# We cache results for transaction with the same ID
|
||||
self._transaction_resp_cache = ResponseCache(
|
||||
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
|
||||
hs, "fed_txn_handler", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
@@ -130,10 +129,10 @@ class FederationServer(FederationBase):
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(
|
||||
hs.get_clock(), "state_resp", timeout_ms=30000
|
||||
hs, "state_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
self._state_ids_resp_cache = ResponseCache(
|
||||
hs.get_clock(), "state_ids_resp", timeout_ms=30000
|
||||
hs, "state_ids_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self._federation_metrics_domains = (
|
||||
@@ -170,33 +169,6 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction_id)
|
||||
|
||||
# Reject malformed transactions early: reject if too many PDUs/EDUs
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
return 400, {}
|
||||
|
||||
# we only process one transaction from each origin at a time. We need to do
|
||||
# this check here, rather than in _on_incoming_transaction_inner so that we
|
||||
# don't cache the rejection in _transaction_resp_cache (so that if the txn
|
||||
# arrives again later, we can process it).
|
||||
current_transaction = self._active_transactions.get(origin)
|
||||
if current_transaction and current_transaction != transaction_id:
|
||||
logger.warning(
|
||||
"Received another txn %s from %s while still processing %s",
|
||||
transaction_id,
|
||||
origin,
|
||||
current_transaction,
|
||||
)
|
||||
return 429, {
|
||||
"errcode": Codes.UNKNOWN,
|
||||
"error": "Too many concurrent transactions",
|
||||
}
|
||||
|
||||
# CRITICAL SECTION: we must now not await until we populate _active_transactions
|
||||
# in _on_incoming_transaction_inner.
|
||||
|
||||
# We wrap in a ResponseCache so that we de-duplicate retried
|
||||
# transactions.
|
||||
return await self._transaction_resp_cache.wrap(
|
||||
@@ -210,18 +182,26 @@ class FederationServer(FederationBase):
|
||||
async def _on_incoming_transaction_inner(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
# CRITICAL SECTION: the first thing we must do (before awaiting) is
|
||||
# add an entry to _active_transactions.
|
||||
assert origin not in self._active_transactions
|
||||
self._active_transactions[origin] = transaction.transaction_id # type: ignore
|
||||
# Use a linearizer to ensure that transactions from a remote are
|
||||
# processed in order.
|
||||
with await self._transaction_linearizer.queue(origin):
|
||||
# We rate limit here *after* we've queued up the incoming requests,
|
||||
# so that we don't fill up the ratelimiter with blocked requests.
|
||||
#
|
||||
# This is important as the ratelimiter allows N concurrent requests
|
||||
# at a time, and only starts ratelimiting if there are more requests
|
||||
# than that being processed at a time. If we queued up requests in
|
||||
# the linearizer/response cache *after* the ratelimiting then those
|
||||
# queued up requests would count as part of the allowed limit of N
|
||||
# concurrent requests.
|
||||
with self._federation_ratelimiter.ratelimit(origin) as d:
|
||||
await d
|
||||
|
||||
try:
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
del self._active_transactions[origin]
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def _handle_incoming_transaction(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
@@ -247,6 +227,19 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
|
||||
|
||||
# Reject if PDU count > 50 or EDU count > 100
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
|
||||
response = {}
|
||||
await self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return 400, response
|
||||
|
||||
# We process PDUs and EDUs in parallel. This is important as we don't
|
||||
# want to block things like to device messages from reaching clients
|
||||
# behind the potentially expensive handling of PDUs.
|
||||
@@ -342,48 +335,42 @@ class FederationServer(FederationBase):
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
async def process_pdus_for_room(room_id: str):
|
||||
with nested_logging_context(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning(
|
||||
"Ignoring PDUs for room %s from banned server", room_id
|
||||
)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
pdu_results[pdu.event_id] = await process_pdu(pdu)
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
|
||||
async def process_pdu(pdu: EventBase) -> JsonDict:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
return {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
return {"error": str(e)}
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if newest_pdu_ts and origin in self._federation_metrics_domains:
|
||||
last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
|
||||
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
|
||||
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
|
||||
|
||||
return pdu_results
|
||||
|
||||
@@ -461,22 +448,18 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def _on_state_ids_request_compute(self, room_id, event_id):
|
||||
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||
|
||||
async def _on_context_state_request_compute(
|
||||
self, room_id: str, event_id: str
|
||||
) -> Dict[str, list]:
|
||||
if event_id:
|
||||
pdus = await self.handler.get_state_for_pdu(
|
||||
room_id, event_id
|
||||
) # type: Iterable[EventBase]
|
||||
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||
else:
|
||||
pdus = (await self.state.get_current_state(room_id)).values()
|
||||
|
||||
auth_chain = await self.store.get_auth_chain(
|
||||
room_id, [pdu.event_id for pdu in pdus]
|
||||
)
|
||||
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
@@ -880,9 +863,7 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
|
||||
self.query_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
|
||||
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
|
||||
|
||||
# Map from type to instance names that we should route EDU handling to.
|
||||
# We randomly choose one instance from the list to route to for each new
|
||||
@@ -916,7 +897,7 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(
|
||||
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
|
||||
self, query_type: str, handler: Callable[[dict], defer.Deferred]
|
||||
):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation query of the given type.
|
||||
@@ -955,6 +936,10 @@ class FederationHandlerRegistry:
|
||||
):
|
||||
return
|
||||
|
||||
# Temporary patch to drop cross-user key share requests
|
||||
if edu_type == "m.room_key_request":
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
@@ -989,7 +974,7 @@ class FederationHandlerRegistry:
|
||||
# Oh well, let's just log and move on.
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
async def on_query(self, query_type: str, args: dict) -> JsonDict:
|
||||
async def on_query(self, query_type: str, args: dict):
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return await handler(args)
|
||||
|
||||
@@ -17,7 +17,6 @@ import datetime
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, cast
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.errors import (
|
||||
@@ -94,10 +93,6 @@ class PerDestinationQueue:
|
||||
self._destination = destination
|
||||
self.transmission_loop_running = False
|
||||
|
||||
# Flag to signal to any running transmission loop that there is new data
|
||||
# queued up to be sent.
|
||||
self._new_data_to_send = False
|
||||
|
||||
# True whilst we are sending events that the remote homeserver missed
|
||||
# because it was unreachable. We start in this state so we can perform
|
||||
# catch-up at startup.
|
||||
@@ -113,7 +108,7 @@ class PerDestinationQueue:
|
||||
# destination (we are the only updater so this is safe)
|
||||
self._last_successful_stream_ordering = None # type: Optional[int]
|
||||
|
||||
# a queue of pending PDUs
|
||||
# a list of pending PDUs
|
||||
self._pending_pdus = [] # type: List[EventBase]
|
||||
|
||||
# XXX this is never actually used: see
|
||||
@@ -213,10 +208,6 @@ class PerDestinationQueue:
|
||||
transaction in the background.
|
||||
"""
|
||||
|
||||
# Mark that we (may) have new things to send, so that any running
|
||||
# transmission loop will recheck whether there is stuff to send.
|
||||
self._new_data_to_send = True
|
||||
|
||||
if self.transmission_loop_running:
|
||||
# XXX: this can get stuck on by a never-ending
|
||||
# request at which point pending_pdus just keeps growing.
|
||||
@@ -259,41 +250,125 @@ class PerDestinationQueue:
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
async with _TransactionQueueManager(self) as (
|
||||
pending_pdus,
|
||||
pending_edus,
|
||||
):
|
||||
if not pending_pdus and not pending_edus:
|
||||
logger.debug("TX [%s] Nothing to send", self._destination)
|
||||
device_update_edus, dev_list_id = await self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
# If we've gotten told about new things to send during
|
||||
# checking for things to send, we try looking again.
|
||||
# Otherwise new PDUs or EDUs might arrive in the meantime,
|
||||
# but not get sent because we hold the
|
||||
# `transmission_loop_running` flag.
|
||||
if self._new_data_to_send:
|
||||
continue
|
||||
else:
|
||||
return
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug(
|
||||
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination,
|
||||
len(pending_pdus),
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# BEGIN CRITICAL SECTION
|
||||
#
|
||||
# In order to avoid a race condition, we need to make sure that
|
||||
# the following code (from popping the queues up to the point
|
||||
# where we decide if we actually have any pending messages) is
|
||||
# atomic - otherwise new PDUs or EDUs might arrive in the
|
||||
# meantime, but not get sent because we hold the
|
||||
# transmission_loop_running flag.
|
||||
|
||||
pending_pdus = self._pending_pdus
|
||||
|
||||
# We can only include at most 50 PDUs per transactions
|
||||
pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
|
||||
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=False))
|
||||
pending_presence = self._pending_presence
|
||||
self._pending_presence = {}
|
||||
if pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self._clock.time_msec()
|
||||
)
|
||||
for presence in pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
self._destination, pending_pdus, pending_edus
|
||||
)
|
||||
|
||||
pending_edus.extend(
|
||||
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self._pending_edus_keyed
|
||||
):
|
||||
_, val = self._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug(
|
||||
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination,
|
||||
len(pending_pdus),
|
||||
)
|
||||
|
||||
if not pending_pdus and not pending_edus:
|
||||
logger.debug("TX [%s] Nothing to send", self._destination)
|
||||
self._last_device_stream_id = device_stream_id
|
||||
return
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=True))
|
||||
|
||||
# END CRITICAL SECTION
|
||||
|
||||
success = await self._transaction_manager.send_new_transaction(
|
||||
self._destination, pending_pdus, pending_edus
|
||||
)
|
||||
if success:
|
||||
sent_transactions_counter.inc()
|
||||
sent_edus_counter.inc(len(pending_edus))
|
||||
for edu in pending_edus:
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if to_device_edus:
|
||||
await self._store.delete_device_msgs_for_remote(
|
||||
self._destination, device_stream_id
|
||||
)
|
||||
|
||||
# also mark the device updates as sent
|
||||
if device_update_edus:
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self._destination, dev_list_id
|
||||
)
|
||||
await self._store.mark_as_sent_devices_by_remote(
|
||||
self._destination, dev_list_id
|
||||
)
|
||||
|
||||
self._last_device_stream_id = device_stream_id
|
||||
self._last_device_list_stream_id = dev_list_id
|
||||
|
||||
if pending_pdus:
|
||||
# we sent some PDUs and it was successful, so update our
|
||||
# last_successful_stream_ordering in the destinations table.
|
||||
final_pdu = pending_pdus[-1]
|
||||
last_successful_stream_ordering = (
|
||||
final_pdu.internal_metadata.stream_ordering
|
||||
)
|
||||
assert last_successful_stream_ordering
|
||||
await self._store.set_destination_last_successful_stream_ordering(
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
else:
|
||||
break
|
||||
except NotRetryingDestination as e:
|
||||
logger.debug(
|
||||
"TX [%s] not ready for retry yet (next retry at %s) - "
|
||||
@@ -326,7 +401,7 @@ class PerDestinationQueue:
|
||||
self._pending_presence = {}
|
||||
self._pending_rrs = {}
|
||||
|
||||
self._start_catching_up()
|
||||
self._start_catching_up()
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
except HttpResponseException as e:
|
||||
@@ -337,6 +412,7 @@ class PerDestinationQueue:
|
||||
e,
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"TX [%s] Failed to send transaction: %s", self._destination, e
|
||||
@@ -346,12 +422,16 @@ class PerDestinationQueue:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
except Exception:
|
||||
logger.exception("TX [%s] Failed to send transaction", self._destination)
|
||||
for p in pending_pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
finally:
|
||||
# We want to be *very* sure we clear this after we stop processing
|
||||
self.transmission_loop_running = False
|
||||
@@ -419,10 +499,13 @@ class PerDestinationQueue:
|
||||
rooms = [p.room_id for p in catchup_pdus]
|
||||
logger.info("Catching up rooms to %s: %r", self._destination, rooms)
|
||||
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
success = await self._transaction_manager.send_new_transaction(
|
||||
self._destination, catchup_pdus, []
|
||||
)
|
||||
|
||||
if not success:
|
||||
return
|
||||
|
||||
sent_transactions_counter.inc()
|
||||
final_pdu = catchup_pdus[-1]
|
||||
self._last_successful_stream_ordering = cast(
|
||||
@@ -501,135 +584,3 @@ class PerDestinationQueue:
|
||||
"""
|
||||
self._catching_up = True
|
||||
self._pending_pdus = []
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _TransactionQueueManager:
|
||||
"""A helper async context manager for pulling stuff off the queues and
|
||||
tracking what was last successfully sent, etc.
|
||||
"""
|
||||
|
||||
queue = attr.ib(type=PerDestinationQueue)
|
||||
|
||||
_device_stream_id = attr.ib(type=Optional[int], default=None)
|
||||
_device_list_id = attr.ib(type=Optional[int], default=None)
|
||||
_last_stream_ordering = attr.ib(type=Optional[int], default=None)
|
||||
_pdus = attr.ib(type=List[EventBase], factory=list)
|
||||
|
||||
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
|
||||
# First we calculate the EDUs we want to send, if any.
|
||||
|
||||
# We start by fetching device related EDUs, i.e device updates and to
|
||||
# device messages. We have to keep 2 free slots for presence and rr_edus.
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
if device_update_edus:
|
||||
self._device_list_id = dev_list_id
|
||||
else:
|
||||
self.queue._last_device_list_stream_id = dev_list_id
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self.queue._get_to_device_message_edus(limit)
|
||||
|
||||
if to_device_edus:
|
||||
self._device_stream_id = device_stream_id
|
||||
else:
|
||||
self.queue._last_device_stream_id = device_stream_id
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# Now add the read receipt EDU.
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
|
||||
|
||||
# And presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Finally add any other types of EDUs if there is room.
|
||||
pending_edus.extend(
|
||||
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self.queue._pending_edus_keyed
|
||||
):
|
||||
_, val = self.queue._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
|
||||
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
|
||||
# queue
|
||||
self._pdus = self.queue._pending_pdus[:50]
|
||||
|
||||
if not self._pdus and not pending_edus:
|
||||
return [], []
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
-1
|
||||
].internal_metadata.stream_ordering
|
||||
assert self._last_stream_ordering
|
||||
|
||||
return self._pdus, pending_edus
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
if exc_type is not None:
|
||||
# Failed to send transaction, so we bail out.
|
||||
return
|
||||
|
||||
# Successfully sent transactions, so we remove pending PDUs from the queue
|
||||
if self._pdus:
|
||||
self.queue._pending_pdus = self.queue._pending_pdus[len(self._pdus) :]
|
||||
|
||||
# Succeeded to send the transaction so we record where we have sent up
|
||||
# to in the various streams
|
||||
|
||||
if self._device_stream_id:
|
||||
await self.queue._store.delete_device_msgs_for_remote(
|
||||
self.queue._destination, self._device_stream_id
|
||||
)
|
||||
self.queue._last_device_stream_id = self._device_stream_id
|
||||
|
||||
# also mark the device updates as sent
|
||||
if self._device_list_id:
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self.queue._destination, self._device_list_id
|
||||
)
|
||||
await self.queue._store.mark_as_sent_devices_by_remote(
|
||||
self.queue._destination, self._device_list_id
|
||||
)
|
||||
self.queue._last_device_list_stream_id = self._device_list_id
|
||||
|
||||
if self._last_stream_ordering:
|
||||
# we sent some PDUs and it was successful, so update our
|
||||
# last_successful_stream_ordering in the destinations table.
|
||||
await self.queue._store.set_destination_last_successful_stream_ordering(
|
||||
self.queue._destination, self._last_stream_ordering
|
||||
)
|
||||
|
||||
@@ -36,9 +36,9 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
last_pdu_ts_metric = Gauge(
|
||||
"synapse_federation_last_sent_pdu_time",
|
||||
"The timestamp of the last PDU which was successfully sent to the given domain",
|
||||
last_pdu_age_metric = Gauge(
|
||||
"synapse_federation_last_sent_pdu_age",
|
||||
"The age (in seconds) of the last PDU successfully sent to the given domain",
|
||||
labelnames=("server_name",),
|
||||
)
|
||||
|
||||
@@ -69,12 +69,15 @@ class TransactionManager:
|
||||
destination: str,
|
||||
pdus: List[EventBase],
|
||||
edus: List[Edu],
|
||||
) -> None:
|
||||
) -> bool:
|
||||
"""
|
||||
Args:
|
||||
destination: The destination to send to (e.g. 'example.org')
|
||||
pdus: In-order list of PDUs to send
|
||||
edus: List of EDUs to send
|
||||
|
||||
Returns:
|
||||
True iff the transaction was successful
|
||||
"""
|
||||
|
||||
# Make a transaction-sending opentracing span. This span follows on from
|
||||
@@ -93,6 +96,8 @@ class TransactionManager:
|
||||
edu.strip_context()
|
||||
|
||||
with start_active_span_follows_from("send_transaction", span_contexts):
|
||||
success = True
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
@@ -147,29 +152,45 @@ class TransactionManager:
|
||||
response = await self._transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
set_tag(tags.ERROR, True)
|
||||
if e.code in (401, 404, 429) or 500 <= e.code:
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response", destination, txn_id, code
|
||||
)
|
||||
raise e
|
||||
|
||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||
raise
|
||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||
|
||||
logger.info("TX [%s] {%s} got 200 response", destination, txn_id)
|
||||
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination,
|
||||
txn_id,
|
||||
e_id,
|
||||
r,
|
||||
)
|
||||
else:
|
||||
for p in pdus:
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
"TX [%s] {%s} Failed to send event %s",
|
||||
destination,
|
||||
txn_id,
|
||||
e_id,
|
||||
r,
|
||||
p.event_id,
|
||||
)
|
||||
success = False
|
||||
|
||||
if pdus and destination in self._federation_metrics_domains:
|
||||
if success and pdus and destination in self._federation_metrics_domains:
|
||||
last_pdu = pdus[-1]
|
||||
last_pdu_ts_metric.labels(server_name=destination).set(
|
||||
last_pdu.origin_server_ts / 1000
|
||||
last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts
|
||||
last_pdu_age_metric.labels(server_name=destination).set(
|
||||
last_pdu_age / 1000
|
||||
)
|
||||
|
||||
set_tag(tags.ERROR, not success)
|
||||
return success
|
||||
|
||||
@@ -73,9 +73,7 @@ class AcmeHandler:
|
||||
"Listening for ACME requests on %s:%i", host, self.hs.config.acme_port
|
||||
)
|
||||
try:
|
||||
self.reactor.listenTCP(
|
||||
self.hs.config.acme_port, srv, backlog=50, interface=host
|
||||
)
|
||||
self.reactor.listenTCP(self.hs.config.acme_port, srv, interface=host)
|
||||
except twisted.internet.error.CannotListenError as e:
|
||||
check_bind_error(e, host, bind_addresses)
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ import attr
|
||||
import bcrypt
|
||||
import pymacaroons
|
||||
|
||||
from twisted.web.server import Request
|
||||
from twisted.web.http import Request
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import (
|
||||
@@ -65,7 +65,6 @@ from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
@@ -171,16 +170,6 @@ class SsoLoginExtraAttributes:
|
||||
extra_attributes = attr.ib(type=JsonDict)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class LoginTokenAttributes:
|
||||
"""Data we store in a short-term login token"""
|
||||
|
||||
user_id = attr.ib(type=str)
|
||||
|
||||
# the SSO Identity Provider that the user authenticated with, to get this token
|
||||
auth_provider_id = attr.ib(type=str)
|
||||
|
||||
|
||||
class AuthHandler(BaseHandler):
|
||||
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
|
||||
|
||||
@@ -337,8 +326,7 @@ class AuthHandler(BaseHandler):
|
||||
user is too high to proceed
|
||||
|
||||
"""
|
||||
if not requester.access_token_id:
|
||||
raise ValueError("Cannot validate a user without an access token")
|
||||
|
||||
if self._ui_auth_session_timeout:
|
||||
last_validated = await self.store.get_access_token_last_validated(
|
||||
requester.access_token_id
|
||||
@@ -493,7 +481,7 @@ class AuthHandler(BaseHandler):
|
||||
sid = authdict["session"]
|
||||
|
||||
# Convert the URI and method to strings.
|
||||
uri = request.uri.decode("utf-8") # type: ignore
|
||||
uri = request.uri.decode("utf-8")
|
||||
method = request.method.decode("utf-8")
|
||||
|
||||
# If there's no session ID, create a new session.
|
||||
@@ -1176,16 +1164,18 @@ class AuthHandler(BaseHandler):
|
||||
return None
|
||||
return user_id
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
async def validate_short_term_login_token_and_get_user_id(self, login_token: str):
|
||||
auth_api = self.hs.get_auth()
|
||||
user_id = None
|
||||
try:
|
||||
res = self.macaroon_gen.verify_short_term_login_token(login_token)
|
||||
macaroon = pymacaroons.Macaroon.deserialize(login_token)
|
||||
user_id = auth_api.get_user_id_from_macaroon(macaroon)
|
||||
auth_api.validate_macaroon(macaroon, "login", user_id)
|
||||
except Exception:
|
||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
await self.auth.check_auth_blocking(res.user_id)
|
||||
return res
|
||||
await self.auth.check_auth_blocking(user_id)
|
||||
return user_id
|
||||
|
||||
async def delete_access_token(self, access_token: str):
|
||||
"""Invalidate a single access token
|
||||
@@ -1214,7 +1204,7 @@ class AuthHandler(BaseHandler):
|
||||
async def delete_access_tokens_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
except_token_id: Optional[int] = None,
|
||||
except_token_id: Optional[str] = None,
|
||||
device_id: Optional[str] = None,
|
||||
):
|
||||
"""Invalidate access tokens belonging to a user
|
||||
@@ -1407,7 +1397,6 @@ class AuthHandler(BaseHandler):
|
||||
async def complete_sso_login(
|
||||
self,
|
||||
registered_user_id: str,
|
||||
auth_provider_id: str,
|
||||
request: Request,
|
||||
client_redirect_url: str,
|
||||
extra_attributes: Optional[JsonDict] = None,
|
||||
@@ -1417,9 +1406,6 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
Args:
|
||||
registered_user_id: The registered user ID to complete SSO login for.
|
||||
auth_provider_id: The id of the SSO Identity provider that was used for
|
||||
login. This will be stored in the login token for future tracking in
|
||||
prometheus metrics.
|
||||
request: The request to complete.
|
||||
client_redirect_url: The URL to which to redirect the user at the end of the
|
||||
process.
|
||||
@@ -1441,7 +1427,6 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
self._complete_sso_login(
|
||||
registered_user_id,
|
||||
auth_provider_id,
|
||||
request,
|
||||
client_redirect_url,
|
||||
extra_attributes,
|
||||
@@ -1452,7 +1437,6 @@ class AuthHandler(BaseHandler):
|
||||
def _complete_sso_login(
|
||||
self,
|
||||
registered_user_id: str,
|
||||
auth_provider_id: str,
|
||||
request: Request,
|
||||
client_redirect_url: str,
|
||||
extra_attributes: Optional[JsonDict] = None,
|
||||
@@ -1479,7 +1463,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# Create a login token
|
||||
login_token = self.macaroon_gen.generate_short_term_login_token(
|
||||
registered_user_id, auth_provider_id=auth_provider_id
|
||||
registered_user_id
|
||||
)
|
||||
|
||||
# Append the login token to the original redirect URL (i.e. with its query
|
||||
@@ -1585,48 +1569,15 @@ class MacaroonGenerator:
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
auth_provider_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
|
||||
) -> str:
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = login")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
macaroon.add_first_party_caveat("auth_provider_id = %s" % (auth_provider_id,))
|
||||
return macaroon.serialize()
|
||||
|
||||
def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes:
|
||||
"""Verify a short-term-login macaroon
|
||||
|
||||
Checks that the given token is a valid, unexpired short-term-login token
|
||||
minted by this server.
|
||||
|
||||
Args:
|
||||
token: the login token to verify
|
||||
|
||||
Returns:
|
||||
the user_id that this token is valid for
|
||||
|
||||
Raises:
|
||||
MacaroonVerificationFailedException if the verification failed
|
||||
"""
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
user_id = get_value_from_macaroon(macaroon, "user_id")
|
||||
auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id")
|
||||
|
||||
v = pymacaroons.Verifier()
|
||||
v.satisfy_exact("gen = 1")
|
||||
v.satisfy_exact("type = login")
|
||||
v.satisfy_general(lambda c: c.startswith("user_id = "))
|
||||
v.satisfy_general(lambda c: c.startswith("auth_provider_id = "))
|
||||
satisfy_expiry(v, self.hs.get_clock().time_msec)
|
||||
v.verify(macaroon, self.hs.config.key.macaroon_secret_key)
|
||||
|
||||
return LoginTokenAttributes(user_id=user_id, auth_provider_id=auth_provider_id)
|
||||
|
||||
def generate_delete_pusher_token(self, user_id: str) -> str:
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = delete_pusher")
|
||||
|
||||
@@ -83,7 +83,6 @@ class CasHandler:
|
||||
# the SsoIdentityProvider protocol type.
|
||||
self.idp_icon = None
|
||||
self.idp_brand = None
|
||||
self.unstable_idp_brand = None
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ class FederationHandler(BaseHandler):
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu")
|
||||
logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
|
||||
return
|
||||
|
||||
# do some initial sanity-checking of the event. In particular, make
|
||||
@@ -210,14 +210,18 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
self._sanity_check_event(pdu)
|
||||
except SynapseError as err:
|
||||
logger.warning("Received event failed sanity checks")
|
||||
logger.warning(
|
||||
"[%s %s] Received event failed sanity checks", room_id, event_id
|
||||
)
|
||||
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
|
||||
|
||||
# If we are currently in the process of joining this room, then we
|
||||
# queue up events for later processing.
|
||||
if room_id in self.room_queues:
|
||||
logger.info(
|
||||
"Queuing PDU from %s for now: join in progress",
|
||||
"[%s %s] Queuing PDU from %s for now: join in progress",
|
||||
room_id,
|
||||
event_id,
|
||||
origin,
|
||||
)
|
||||
self.room_queues[room_id].append((pdu, origin))
|
||||
@@ -232,7 +236,9 @@ class FederationHandler(BaseHandler):
|
||||
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring PDU from %s as we're not in the room",
|
||||
"[%s %s] Ignoring PDU from %s as we're not in the room",
|
||||
room_id,
|
||||
event_id,
|
||||
origin,
|
||||
)
|
||||
return None
|
||||
@@ -244,7 +250,7 @@ class FederationHandler(BaseHandler):
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = await self.get_min_depth_for_context(pdu.room_id)
|
||||
|
||||
logger.debug("min_depth: %d", min_depth)
|
||||
logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth)
|
||||
|
||||
prevs = set(pdu.prev_event_ids())
|
||||
seen = await self.store.have_events_in_timeline(prevs)
|
||||
@@ -261,13 +267,17 @@ class FederationHandler(BaseHandler):
|
||||
# If we're missing stuff, ensure we only fetch stuff one
|
||||
# at a time.
|
||||
logger.info(
|
||||
"Acquiring room lock to fetch %d missing prev_events: %s",
|
||||
"[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
|
||||
room_id,
|
||||
event_id,
|
||||
len(missing_prevs),
|
||||
shortstr(missing_prevs),
|
||||
)
|
||||
with (await self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||
logger.info(
|
||||
"Acquired room lock to fetch %d missing prev_events",
|
||||
"[%s %s] Acquired room lock to fetch %d missing prev_events",
|
||||
room_id,
|
||||
event_id,
|
||||
len(missing_prevs),
|
||||
)
|
||||
|
||||
@@ -287,7 +297,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if not prevs - seen:
|
||||
logger.info(
|
||||
"Found all missing prev_events",
|
||||
"[%s %s] Found all missing prev_events",
|
||||
room_id,
|
||||
event_id,
|
||||
)
|
||||
|
||||
if prevs - seen:
|
||||
@@ -317,7 +329,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if sent_to_us_directly:
|
||||
logger.warning(
|
||||
"Rejecting: failed to fetch %d prev events: %s",
|
||||
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
|
||||
room_id,
|
||||
event_id,
|
||||
len(prevs - seen),
|
||||
shortstr(prevs - seen),
|
||||
)
|
||||
@@ -353,16 +367,17 @@ class FederationHandler(BaseHandler):
|
||||
# Ask the remote server for the states we don't
|
||||
# know about
|
||||
for p in prevs - seen:
|
||||
logger.info("Requesting state after missing prev_event %s", p)
|
||||
logger.info(
|
||||
"Requesting state at missing prev_event %s",
|
||||
event_id,
|
||||
)
|
||||
|
||||
with nested_logging_context(p):
|
||||
# note that if any of the missing prevs share missing state or
|
||||
# auth events, the requests to fetch those events are deduped
|
||||
# by the get_pdu_cache in federation_client.
|
||||
remote_state = (
|
||||
await self._get_state_after_missing_prev_event(
|
||||
origin, room_id, p
|
||||
)
|
||||
(remote_state, _,) = await self._get_state_for_room(
|
||||
origin, room_id, p, include_event_in_state=True
|
||||
)
|
||||
|
||||
remote_state_map = {
|
||||
@@ -399,7 +414,10 @@ class FederationHandler(BaseHandler):
|
||||
state = [event_map[e] for e in state_map.values()]
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Error attempting to resolve state at missing " "prev_events",
|
||||
"[%s %s] Error attempting to resolve state at missing "
|
||||
"prev_events",
|
||||
room_id,
|
||||
event_id,
|
||||
exc_info=True,
|
||||
)
|
||||
raise FederationError(
|
||||
@@ -436,7 +454,9 @@ class FederationHandler(BaseHandler):
|
||||
latest |= seen
|
||||
|
||||
logger.info(
|
||||
"Requesting missing events between %s and %s",
|
||||
"[%s %s]: Requesting missing events between %s and %s",
|
||||
room_id,
|
||||
event_id,
|
||||
shortstr(latest),
|
||||
event_id,
|
||||
)
|
||||
@@ -503,11 +523,15 @@ class FederationHandler(BaseHandler):
|
||||
# We failed to get the missing events, but since we need to handle
|
||||
# the case of `get_missing_events` not returning the necessary
|
||||
# events anyway, it is safe to simply log the error and continue.
|
||||
logger.warning("Failed to get prev_events: %s", e)
|
||||
logger.warning(
|
||||
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Got %d prev_events: %s",
|
||||
"[%s %s]: Got %d prev_events: %s",
|
||||
room_id,
|
||||
event_id,
|
||||
len(missing_events),
|
||||
shortstr(missing_events),
|
||||
)
|
||||
@@ -518,7 +542,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
for ev in missing_events:
|
||||
logger.info(
|
||||
"Handling received prev_event %s",
|
||||
"[%s %s] Handling received prev_event %s",
|
||||
room_id,
|
||||
event_id,
|
||||
ev.event_id,
|
||||
)
|
||||
with nested_logging_context(ev.event_id):
|
||||
@@ -527,7 +553,9 @@ class FederationHandler(BaseHandler):
|
||||
except FederationError as e:
|
||||
if e.code == 403:
|
||||
logger.warning(
|
||||
"Received prev_event %s failed history check.",
|
||||
"[%s %s] Received prev_event %s failed history check.",
|
||||
room_id,
|
||||
event_id,
|
||||
ev.event_id,
|
||||
)
|
||||
else:
|
||||
@@ -538,6 +566,7 @@ class FederationHandler(BaseHandler):
|
||||
destination: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
include_event_in_state: bool = False,
|
||||
) -> Tuple[List[EventBase], List[EventBase]]:
|
||||
"""Requests all of the room state at a given event from a remote homeserver.
|
||||
|
||||
@@ -545,9 +574,11 @@ class FederationHandler(BaseHandler):
|
||||
destination: The remote homeserver to query for the state.
|
||||
room_id: The id of the room we're interested in.
|
||||
event_id: The id of the event we want the state at.
|
||||
include_event_in_state: if true, the event itself will be included in the
|
||||
returned state event list.
|
||||
|
||||
Returns:
|
||||
A list of events in the state, not including the event itself, and
|
||||
A list of events in the state, possibly including the event itself, and
|
||||
a list of events in the auth chain for the given event.
|
||||
"""
|
||||
(
|
||||
@@ -559,6 +590,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
desired_events = set(state_event_ids + auth_event_ids)
|
||||
|
||||
if include_event_in_state:
|
||||
desired_events.add(event_id)
|
||||
|
||||
event_map = await self._get_events_from_store_or_dest(
|
||||
destination, room_id, desired_events
|
||||
)
|
||||
@@ -575,6 +609,13 @@ class FederationHandler(BaseHandler):
|
||||
event_map[e_id] for e_id in state_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
if include_event_in_state:
|
||||
remote_event = event_map.get(event_id)
|
||||
if not remote_event:
|
||||
raise Exception("Unable to get missing prev_event %s" % (event_id,))
|
||||
if remote_event.is_state() and remote_event.rejected_reason is None:
|
||||
remote_state.append(remote_event)
|
||||
|
||||
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
@@ -648,131 +689,6 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
return fetched_events
|
||||
|
||||
async def _get_state_after_missing_prev_event(
|
||||
self,
|
||||
destination: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> List[EventBase]:
|
||||
"""Requests all of the room state at a given event from a remote homeserver.
|
||||
|
||||
Args:
|
||||
destination: The remote homeserver to query for the state.
|
||||
room_id: The id of the room we're interested in.
|
||||
event_id: The id of the event we want the state at.
|
||||
|
||||
Returns:
|
||||
A list of events in the state, including the event itself
|
||||
"""
|
||||
# TODO: This function is basically the same as _get_state_for_room. Can
|
||||
# we make backfill() use it, rather than having two code paths? I think the
|
||||
# only difference is that backfill() persists the prev events separately.
|
||||
|
||||
(
|
||||
state_event_ids,
|
||||
auth_event_ids,
|
||||
) = await self.federation_client.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"state_ids returned %i state events, %i auth events",
|
||||
len(state_event_ids),
|
||||
len(auth_event_ids),
|
||||
)
|
||||
|
||||
# start by just trying to fetch the events from the store
|
||||
desired_events = set(state_event_ids)
|
||||
desired_events.add(event_id)
|
||||
logger.debug("Fetching %i events from cache/store", len(desired_events))
|
||||
fetched_events = await self.store.get_events(
|
||||
desired_events, allow_rejected=True
|
||||
)
|
||||
|
||||
missing_desired_events = desired_events - fetched_events.keys()
|
||||
logger.debug(
|
||||
"We are missing %i events (got %i)",
|
||||
len(missing_desired_events),
|
||||
len(fetched_events),
|
||||
)
|
||||
|
||||
# We probably won't need most of the auth events, so let's just check which
|
||||
# we have for now, rather than thrashing the event cache with them all
|
||||
# unnecessarily.
|
||||
|
||||
# TODO: we probably won't actually need all of the auth events, since we
|
||||
# already have a bunch of the state events. It would be nice if the
|
||||
# federation api gave us a way of finding out which we actually need.
|
||||
|
||||
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
|
||||
missing_auth_events.difference_update(
|
||||
await self.store.have_seen_events(missing_auth_events)
|
||||
)
|
||||
logger.debug("We are also missing %i auth events", len(missing_auth_events))
|
||||
|
||||
missing_events = missing_desired_events | missing_auth_events
|
||||
logger.debug("Fetching %i events from remote", len(missing_events))
|
||||
await self._get_events_and_persist(
|
||||
destination=destination, room_id=room_id, events=missing_events
|
||||
)
|
||||
|
||||
# we need to make sure we re-load from the database to get the rejected
|
||||
# state correct.
|
||||
fetched_events.update(
|
||||
(await self.store.get_events(missing_desired_events, allow_rejected=True))
|
||||
)
|
||||
|
||||
# check for events which were in the wrong room.
|
||||
#
|
||||
# this can happen if a remote server claims that the state or
|
||||
# auth_events at an event in room A are actually events in room B
|
||||
|
||||
bad_events = [
|
||||
(event_id, event.room_id)
|
||||
for event_id, event in fetched_events.items()
|
||||
if event.room_id != room_id
|
||||
]
|
||||
|
||||
for bad_event_id, bad_room_id in bad_events:
|
||||
# This is a bogus situation, but since we may only discover it a long time
|
||||
# after it happened, we try our best to carry on, by just omitting the
|
||||
# bad events from the returned state set.
|
||||
logger.warning(
|
||||
"Remote server %s claims event %s in room %s is an auth/state "
|
||||
"event in room %s",
|
||||
destination,
|
||||
bad_event_id,
|
||||
bad_room_id,
|
||||
room_id,
|
||||
)
|
||||
|
||||
del fetched_events[bad_event_id]
|
||||
|
||||
# if we couldn't get the prev event in question, that's a problem.
|
||||
remote_event = fetched_events.get(event_id)
|
||||
if not remote_event:
|
||||
raise Exception("Unable to get missing prev_event %s" % (event_id,))
|
||||
|
||||
# missing state at that event is a warning, not a blocker
|
||||
# XXX: this doesn't sound right? it means that we'll end up with incomplete
|
||||
# state.
|
||||
failed_to_fetch = desired_events - fetched_events.keys()
|
||||
if failed_to_fetch:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state events for %s %s",
|
||||
event_id,
|
||||
failed_to_fetch,
|
||||
)
|
||||
|
||||
remote_state = [
|
||||
fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events
|
||||
]
|
||||
|
||||
if remote_event.is_state() and remote_event.rejected_reason is None:
|
||||
remote_state.append(remote_event)
|
||||
|
||||
return remote_state
|
||||
|
||||
async def _process_received_pdu(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -791,7 +707,10 @@ class FederationHandler(BaseHandler):
|
||||
(ie, we are missing one or more prev_events), the resolved state at the
|
||||
event
|
||||
"""
|
||||
logger.debug("Processing event: %s", event)
|
||||
room_id = event.room_id
|
||||
event_id = event.event_id
|
||||
|
||||
logger.debug("[%s %s] Processing event: %s", room_id, event_id, event)
|
||||
|
||||
try:
|
||||
await self._handle_new_event(origin, event, state=state)
|
||||
@@ -952,6 +871,7 @@ class FederationHandler(BaseHandler):
|
||||
destination=dest,
|
||||
room_id=room_id,
|
||||
event_id=e_id,
|
||||
include_event_in_state=False,
|
||||
)
|
||||
auth_events.update({a.event_id: a for a in auth})
|
||||
auth_events.update({s.event_id: s for s in state})
|
||||
@@ -1397,7 +1317,7 @@ class FederationHandler(BaseHandler):
|
||||
async def on_event_auth(self, event_id: str) -> List[EventBase]:
|
||||
event = await self.store.get_event(event_id)
|
||||
auth = await self.store.get_auth_chain(
|
||||
event.room_id, list(event.auth_event_ids()), include_given=True
|
||||
list(event.auth_event_ids()), include_given=True
|
||||
)
|
||||
return list(auth)
|
||||
|
||||
@@ -1660,7 +1580,7 @@ class FederationHandler(BaseHandler):
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
state_ids = list(prev_state_ids.values())
|
||||
auth_chain = await self.store.get_auth_chain(event.room_id, state_ids)
|
||||
auth_chain = await self.store.get_auth_chain(state_ids)
|
||||
|
||||
state = await self.store.get_events(list(prev_state_ids.values()))
|
||||
|
||||
@@ -2299,7 +2219,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = await self.store.get_auth_chain(
|
||||
room_id, list(event.auth_event_ids()), include_given=True
|
||||
list(event.auth_event_ids()), include_given=True
|
||||
)
|
||||
|
||||
# TODO: Check if we would now reject event_id. If so we need to tell
|
||||
|
||||
@@ -48,7 +48,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
self.snapshot_cache = ResponseCache(
|
||||
hs.get_clock(), "initial_sync_cache"
|
||||
hs, "initial_sync_cache"
|
||||
) # type: ResponseCache[Tuple[str, Optional[StreamToken], Optional[StreamToken], str, Optional[int], bool, bool]]
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.storage = hs.get_storage()
|
||||
|
||||
@@ -252,7 +252,7 @@ class MessageHandler:
|
||||
# If this is an AS, double check that they are allowed to see the members.
|
||||
# This can either be because the AS user is in the room or because there
|
||||
# is a user in the room that the AS is "interested in"
|
||||
if requester.app_service and user_id not in users_with_profile:
|
||||
if False and requester.app_service and user_id not in users_with_profile:
|
||||
for uid in users_with_profile:
|
||||
if requester.app_service.is_interested_in_user(uid):
|
||||
break
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 Quentin Gliech
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,13 +14,13 @@
|
||||
# limitations under the License.
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar, Union
|
||||
from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import attr
|
||||
import pymacaroons
|
||||
from authlib.common.security import generate_token
|
||||
from authlib.jose import JsonWebToken, jwt
|
||||
from authlib.jose import JsonWebToken
|
||||
from authlib.oauth2.auth import ClientAuth
|
||||
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
|
||||
from authlib.oidc.core import CodeIDToken, ImplicitIDToken, UserInfo
|
||||
@@ -29,26 +28,20 @@ from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||
from jinja2 import Environment, Template
|
||||
from pymacaroons.exceptions import (
|
||||
MacaroonDeserializationException,
|
||||
MacaroonInitException,
|
||||
MacaroonInvalidSignatureException,
|
||||
)
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.oidc_config import (
|
||||
OidcProviderClientSecretJwtKey,
|
||||
OidcProviderConfig,
|
||||
)
|
||||
from synapse.config.oidc_config import OidcProviderConfig
|
||||
from synapse.handlers.sso import MappingException, UserAttributes
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
|
||||
from synapse.util import Clock, json_decoder
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -218,7 +211,7 @@ class OidcHandler:
|
||||
session_data = self._token_generator.verify_oidc_session_token(
|
||||
session, state
|
||||
)
|
||||
except (MacaroonInitException, MacaroonDeserializationException, KeyError) as e:
|
||||
except (MacaroonDeserializationException, ValueError) as e:
|
||||
logger.exception("Invalid session for OIDC callback")
|
||||
self._sso_handler.render_error(request, "invalid_session", str(e))
|
||||
return
|
||||
@@ -282,21 +275,9 @@ class OidcProvider:
|
||||
|
||||
self._scopes = provider.scopes
|
||||
self._user_profile_method = provider.user_profile_method
|
||||
|
||||
client_secret = None # type: Union[None, str, JwtClientSecret]
|
||||
if provider.client_secret:
|
||||
client_secret = provider.client_secret
|
||||
elif provider.client_secret_jwt_key:
|
||||
client_secret = JwtClientSecret(
|
||||
provider.client_secret_jwt_key,
|
||||
provider.client_id,
|
||||
provider.issuer,
|
||||
hs.get_clock(),
|
||||
)
|
||||
|
||||
self._client_auth = ClientAuth(
|
||||
provider.client_id,
|
||||
client_secret,
|
||||
provider.client_secret,
|
||||
provider.client_auth_method,
|
||||
) # type: ClientAuth
|
||||
self._client_auth_method = provider.client_auth_method
|
||||
@@ -331,9 +312,6 @@ class OidcProvider:
|
||||
# optional brand identifier for this auth provider
|
||||
self.idp_brand = provider.idp_brand
|
||||
|
||||
# Optional brand identifier for the unstable API (see MSC2858).
|
||||
self.unstable_idp_brand = provider.unstable_idp_brand
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
|
||||
self._sso_handler.register_identity_provider(self)
|
||||
@@ -543,7 +521,7 @@ class OidcProvider:
|
||||
"""
|
||||
metadata = await self.load_metadata()
|
||||
token_endpoint = metadata.get("token_endpoint")
|
||||
raw_headers = {
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": self._http_client.user_agent,
|
||||
"Accept": "application/json",
|
||||
@@ -557,10 +535,10 @@ class OidcProvider:
|
||||
body = urlencode(args, True)
|
||||
|
||||
# Fill the body/headers with credentials
|
||||
uri, raw_headers, body = self._client_auth.prepare(
|
||||
method="POST", uri=token_endpoint, headers=raw_headers, body=body
|
||||
uri, headers, body = self._client_auth.prepare(
|
||||
method="POST", uri=token_endpoint, headers=headers, body=body
|
||||
)
|
||||
headers = Headers({k: [v] for (k, v) in raw_headers.items()})
|
||||
headers = {k: [v] for (k, v) in headers.items()}
|
||||
|
||||
# Do the actual request
|
||||
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||
@@ -767,7 +745,7 @@ class OidcProvider:
|
||||
idp_id=self.idp_id,
|
||||
nonce=nonce,
|
||||
client_redirect_url=client_redirect_url.decode(),
|
||||
ui_auth_session_id=ui_auth_session_id or "",
|
||||
ui_auth_session_id=ui_auth_session_id,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -998,81 +976,6 @@ class OidcProvider:
|
||||
return str(remote_user_id)
|
||||
|
||||
|
||||
# number of seconds a newly-generated client secret should be valid for
|
||||
CLIENT_SECRET_VALIDITY_SECONDS = 3600
|
||||
|
||||
# minimum remaining validity on a client secret before we should generate a new one
|
||||
CLIENT_SECRET_MIN_VALIDITY_SECONDS = 600
|
||||
|
||||
|
||||
class JwtClientSecret:
|
||||
"""A class which generates a new client secret on demand, based on a JWK
|
||||
|
||||
This implementation is designed to comply with the requirements for Apple Sign in:
|
||||
https://developer.apple.com/documentation/sign_in_with_apple/generate_and_validate_tokens#3262048
|
||||
|
||||
It looks like those requirements are based on https://tools.ietf.org/html/rfc7523,
|
||||
but it's worth noting that we still put the generated secret in the "client_secret"
|
||||
field (or rather, whereever client_auth_method puts it) rather than in a
|
||||
client_assertion field in the body as that RFC seems to require.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
key: OidcProviderClientSecretJwtKey,
|
||||
oauth_client_id: str,
|
||||
oauth_issuer: str,
|
||||
clock: Clock,
|
||||
):
|
||||
self._key = key
|
||||
self._oauth_client_id = oauth_client_id
|
||||
self._oauth_issuer = oauth_issuer
|
||||
self._clock = clock
|
||||
self._cached_secret = b""
|
||||
self._cached_secret_replacement_time = 0
|
||||
|
||||
def __str__(self):
|
||||
# if client_auth_method is client_secret_basic, then ClientAuth.prepare calls
|
||||
# encode_client_secret_basic, which calls "{}".format(secret), which ends up
|
||||
# here.
|
||||
return self._get_secret().decode("ascii")
|
||||
|
||||
def __bytes__(self):
|
||||
# if client_auth_method is client_secret_post, then ClientAuth.prepare calls
|
||||
# encode_client_secret_post, which ends up here.
|
||||
return self._get_secret()
|
||||
|
||||
def _get_secret(self) -> bytes:
|
||||
now = self._clock.time()
|
||||
|
||||
# if we have enough validity on our existing secret, use it
|
||||
if now < self._cached_secret_replacement_time:
|
||||
return self._cached_secret
|
||||
|
||||
issued_at = int(now)
|
||||
expires_at = issued_at + CLIENT_SECRET_VALIDITY_SECONDS
|
||||
|
||||
# we copy the configured header because jwt.encode modifies it.
|
||||
header = dict(self._key.jwt_header)
|
||||
|
||||
# see https://tools.ietf.org/html/rfc7523#section-3
|
||||
payload = {
|
||||
"sub": self._oauth_client_id,
|
||||
"aud": self._oauth_issuer,
|
||||
"iat": issued_at,
|
||||
"exp": expires_at,
|
||||
**self._key.jwt_payload,
|
||||
}
|
||||
logger.info(
|
||||
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
|
||||
)
|
||||
self._cached_secret = jwt.encode(header, payload, self._key.key)
|
||||
self._cached_secret_replacement_time = (
|
||||
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
|
||||
)
|
||||
return self._cached_secret
|
||||
|
||||
|
||||
class OidcSessionTokenGenerator:
|
||||
"""Methods for generating and checking OIDC Session cookies."""
|
||||
|
||||
@@ -1117,9 +1020,10 @@ class OidcSessionTokenGenerator:
|
||||
macaroon.add_first_party_caveat(
|
||||
"client_redirect_url = %s" % (session_data.client_redirect_url,)
|
||||
)
|
||||
macaroon.add_first_party_caveat(
|
||||
"ui_auth_session_id = %s" % (session_data.ui_auth_session_id,)
|
||||
)
|
||||
if session_data.ui_auth_session_id:
|
||||
macaroon.add_first_party_caveat(
|
||||
"ui_auth_session_id = %s" % (session_data.ui_auth_session_id,)
|
||||
)
|
||||
now = self._clock.time_msec()
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
@@ -1142,7 +1046,7 @@ class OidcSessionTokenGenerator:
|
||||
The data extracted from the session cookie
|
||||
|
||||
Raises:
|
||||
KeyError if an expected caveat is missing from the macaroon.
|
||||
ValueError if an expected caveat is missing from the macaroon.
|
||||
"""
|
||||
macaroon = pymacaroons.Macaroon.deserialize(session)
|
||||
|
||||
@@ -1153,16 +1057,26 @@ class OidcSessionTokenGenerator:
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
v.satisfy_general(lambda c: c.startswith("idp_id = "))
|
||||
v.satisfy_general(lambda c: c.startswith("client_redirect_url = "))
|
||||
# Sometimes there's a UI auth session ID, it seems to be OK to attempt
|
||||
# to always satisfy this.
|
||||
v.satisfy_general(lambda c: c.startswith("ui_auth_session_id = "))
|
||||
satisfy_expiry(v, self._clock.time_msec)
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
|
||||
# Extract the session data from the token.
|
||||
nonce = get_value_from_macaroon(macaroon, "nonce")
|
||||
idp_id = get_value_from_macaroon(macaroon, "idp_id")
|
||||
client_redirect_url = get_value_from_macaroon(macaroon, "client_redirect_url")
|
||||
ui_auth_session_id = get_value_from_macaroon(macaroon, "ui_auth_session_id")
|
||||
nonce = self._get_value_from_macaroon(macaroon, "nonce")
|
||||
idp_id = self._get_value_from_macaroon(macaroon, "idp_id")
|
||||
client_redirect_url = self._get_value_from_macaroon(
|
||||
macaroon, "client_redirect_url"
|
||||
)
|
||||
try:
|
||||
ui_auth_session_id = self._get_value_from_macaroon(
|
||||
macaroon, "ui_auth_session_id"
|
||||
) # type: Optional[str]
|
||||
except ValueError:
|
||||
ui_auth_session_id = None
|
||||
|
||||
return OidcSessionData(
|
||||
nonce=nonce,
|
||||
idp_id=idp_id,
|
||||
@@ -1170,6 +1084,33 @@ class OidcSessionTokenGenerator:
|
||||
ui_auth_session_id=ui_auth_session_id,
|
||||
)
|
||||
|
||||
def _get_value_from_macaroon(self, macaroon: pymacaroons.Macaroon, key: str) -> str:
|
||||
"""Extracts a caveat value from a macaroon token.
|
||||
|
||||
Args:
|
||||
macaroon: the token
|
||||
key: the key of the caveat to extract
|
||||
|
||||
Returns:
|
||||
The extracted value
|
||||
|
||||
Raises:
|
||||
ValueError: if the caveat was not in the macaroon
|
||||
"""
|
||||
prefix = key + " = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(prefix):
|
||||
return caveat.caveat_id[len(prefix) :]
|
||||
raise ValueError("No %s caveat in macaroon" % (key,))
|
||||
|
||||
def _verify_expiry(self, caveat: str) -> bool:
|
||||
prefix = "time < "
|
||||
if not caveat.startswith(prefix):
|
||||
return False
|
||||
expiry = int(caveat[len(prefix) :])
|
||||
now = self._clock.time_msec()
|
||||
return now < expiry
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class OidcSessionData:
|
||||
@@ -1184,8 +1125,8 @@ class OidcSessionData:
|
||||
# The URL the client gave when it initiated the flow. ("" if this is a UI Auth)
|
||||
client_redirect_url = attr.ib(type=str)
|
||||
|
||||
# The session ID of the ongoing UI Auth ("" if this is a login)
|
||||
ui_auth_session_id = attr.ib(type=str)
|
||||
# The session ID of the ongoing UI Auth (None if this is a login)
|
||||
ui_auth_session_id = attr.ib(type=Optional[str], default=None)
|
||||
|
||||
|
||||
UserAttributeDict = TypedDict(
|
||||
|
||||
@@ -285,7 +285,7 @@ class PaginationHandler:
|
||||
except Exception:
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore
|
||||
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject())
|
||||
)
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
finally:
|
||||
|
||||
@@ -274,25 +274,22 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
|
||||
|
||||
if self._presence_enabled:
|
||||
# Start a LoopingCall in 30s that fires every 5s.
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
def run_timeout_handler():
|
||||
return run_as_background_process(
|
||||
"handle_presence_timeouts", self._handle_timeouts
|
||||
)
|
||||
|
||||
self.clock.call_later(
|
||||
30, self.clock.looping_call, run_timeout_handler, 5000
|
||||
# Start a LoopingCall in 30s that fires every 5s.
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
def run_timeout_handler():
|
||||
return run_as_background_process(
|
||||
"handle_presence_timeouts", self._handle_timeouts
|
||||
)
|
||||
|
||||
def run_persister():
|
||||
return run_as_background_process(
|
||||
"persist_presence_changes", self._persist_unpersisted_changes
|
||||
)
|
||||
self.clock.call_later(30, self.clock.looping_call, run_timeout_handler, 5000)
|
||||
|
||||
self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000)
|
||||
def run_persister():
|
||||
return run_as_background_process(
|
||||
"persist_presence_changes", self._persist_unpersisted_changes
|
||||
)
|
||||
|
||||
self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000)
|
||||
|
||||
LaterGauge(
|
||||
"synapse_handlers_presence_wheel_timer_size",
|
||||
@@ -302,7 +299,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
|
||||
# Used to handle sending of presence to newly joined users/servers
|
||||
if self._presence_enabled:
|
||||
if hs.config.use_presence:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
# Presence is best effort and quickly heals itself, so lets just always
|
||||
|
||||
@@ -16,9 +16,7 @@
|
||||
"""Contains functions for registering clients."""
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import MAX_USERID_LENGTH, EventTypes, JoinRules, LoginType
|
||||
@@ -43,19 +41,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
registration_counter = Counter(
|
||||
"synapse_user_registrations_total",
|
||||
"Number of new users registered (since restart)",
|
||||
["guest", "shadow_banned", "auth_provider"],
|
||||
)
|
||||
|
||||
login_counter = Counter(
|
||||
"synapse_user_logins_total",
|
||||
"Number of user logins (since restart)",
|
||||
["guest", "auth_provider"],
|
||||
)
|
||||
|
||||
|
||||
class RegistrationHandler(BaseHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
@@ -82,7 +67,6 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
else:
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self._register_device_client = self.register_device_inner
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
self.session_lifetime = hs.config.session_lifetime
|
||||
@@ -172,7 +156,6 @@ class RegistrationHandler(BaseHandler):
|
||||
bind_emails: Iterable[str] = [],
|
||||
by_admin: bool = False,
|
||||
user_agent_ips: Optional[List[Tuple[str, str]]] = None,
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Registers a new client on the server.
|
||||
|
||||
@@ -198,9 +181,8 @@ class RegistrationHandler(BaseHandler):
|
||||
admin api, otherwise False.
|
||||
user_agent_ips: Tuples of IP addresses and user-agents used
|
||||
during the registration process.
|
||||
auth_provider_id: The SSO IdP the user used, if any.
|
||||
Returns:
|
||||
The registered user_id.
|
||||
The registere user_id.
|
||||
Raises:
|
||||
SynapseError if there was a problem registering.
|
||||
"""
|
||||
@@ -210,7 +192,6 @@ class RegistrationHandler(BaseHandler):
|
||||
threepid,
|
||||
localpart,
|
||||
user_agent_ips or [],
|
||||
auth_provider_id=auth_provider_id,
|
||||
)
|
||||
|
||||
if result == RegistrationBehaviour.DENY:
|
||||
@@ -299,12 +280,6 @@ class RegistrationHandler(BaseHandler):
|
||||
# if user id is taken, just generate another
|
||||
fail_count += 1
|
||||
|
||||
registration_counter.labels(
|
||||
guest=make_guest,
|
||||
shadow_banned=shadow_banned,
|
||||
auth_provider=(auth_provider_id or ""),
|
||||
).inc()
|
||||
|
||||
if not self.hs.config.user_consent_at_registration:
|
||||
if not self.hs.config.auto_join_rooms_for_guests and make_guest:
|
||||
logger.info(
|
||||
@@ -663,7 +638,6 @@ class RegistrationHandler(BaseHandler):
|
||||
initial_display_name: Optional[str],
|
||||
is_guest: bool = False,
|
||||
is_appservice_ghost: bool = False,
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> Tuple[str, str]:
|
||||
"""Register a device for a user and generate an access token.
|
||||
|
||||
@@ -674,40 +648,21 @@ class RegistrationHandler(BaseHandler):
|
||||
device_id: The device ID to check, or None to generate a new one.
|
||||
initial_display_name: An optional display name for the device.
|
||||
is_guest: Whether this is a guest account
|
||||
auth_provider_id: The SSO IdP the user used, if any (just used for the
|
||||
prometheus metrics).
|
||||
|
||||
Returns:
|
||||
Tuple of device ID and access token
|
||||
"""
|
||||
res = await self._register_device_client(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_display_name=initial_display_name,
|
||||
is_guest=is_guest,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
)
|
||||
|
||||
login_counter.labels(
|
||||
guest=is_guest,
|
||||
auth_provider=(auth_provider_id or ""),
|
||||
).inc()
|
||||
if self.hs.config.worker_app:
|
||||
r = await self._register_device_client(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_display_name=initial_display_name,
|
||||
is_guest=is_guest,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
)
|
||||
return r["device_id"], r["access_token"]
|
||||
|
||||
return res["device_id"], res["access_token"]
|
||||
|
||||
async def register_device_inner(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
initial_display_name: Optional[str],
|
||||
is_guest: bool = False,
|
||||
is_appservice_ghost: bool = False,
|
||||
) -> Dict[str, str]:
|
||||
"""Helper for register_device
|
||||
|
||||
Does the bits that need doing on the main process. Not for use outside this
|
||||
class and RegisterDeviceReplicationServlet.
|
||||
"""
|
||||
assert not self.hs.config.worker_app
|
||||
valid_until_ms = None
|
||||
if self.session_lifetime is not None:
|
||||
if is_guest:
|
||||
@@ -732,7 +687,7 @@ class RegistrationHandler(BaseHandler):
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
)
|
||||
|
||||
return {"device_id": registered_device_id, "access_token": access_token}
|
||||
return (registered_device_id, access_token)
|
||||
|
||||
async def post_registration_actions(
|
||||
self, user_id: str, auth_result: dict, access_token: Optional[str]
|
||||
|
||||
@@ -121,7 +121,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
# succession, only process the first attempt and return its result to
|
||||
# subsequent requests
|
||||
self._upgrade_response_cache = ResponseCache(
|
||||
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
|
||||
hs, "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
|
||||
@@ -43,11 +43,12 @@ class RoomListHandler(BaseHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.enable_room_list_search = hs.config.enable_room_list_search
|
||||
|
||||
self.response_cache = ResponseCache(
|
||||
hs.get_clock(), "room_list"
|
||||
hs, "room_list"
|
||||
) # type: ResponseCache[Tuple[Optional[int], Optional[str], ThirdPartyInstanceID]]
|
||||
self.remote_response_cache = ResponseCache(
|
||||
hs.get_clock(), "remote_room_list", timeout_ms=30 * 1000
|
||||
hs, "remote_room_list", timeout_ms=30 * 1000
|
||||
) # type: ResponseCache[Tuple[str, Optional[int], Optional[str], bool, Optional[str]]]
|
||||
|
||||
async def get_local_public_room_list(
|
||||
|
||||
@@ -66,6 +66,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
self.member_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
@@ -336,19 +337,38 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
key = (room_id,)
|
||||
|
||||
with (await self.member_linearizer.queue(key)):
|
||||
result = await self.update_membership_locked(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
require_consent=require_consent,
|
||||
)
|
||||
as_id = object()
|
||||
if requester.app_service:
|
||||
as_id = requester.app_service.id
|
||||
|
||||
then = self.clock.time_msec()
|
||||
|
||||
with (await self.member_limiter.queue(as_id)):
|
||||
diff = self.clock.time_msec() - then
|
||||
|
||||
if diff > 80 * 1000:
|
||||
# haproxy would have timed the request out anyway...
|
||||
raise SynapseError(504, "took to long to process")
|
||||
|
||||
with (await self.member_linearizer.queue(key)):
|
||||
diff = self.clock.time_msec() - then
|
||||
|
||||
if diff > 80 * 1000:
|
||||
# haproxy would have timed the request out anyway...
|
||||
raise SynapseError(504, "took to long to process")
|
||||
|
||||
result = await self.update_membership_locked(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
require_consent=require_consent,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -81,7 +81,6 @@ class SamlHandler(BaseHandler):
|
||||
# the SsoIdentityProvider protocol type.
|
||||
self.idp_icon = None
|
||||
self.idp_brand = None
|
||||
self.unstable_idp_brand = None
|
||||
|
||||
# a map from saml session id to Saml2SessionData object
|
||||
self._outstanding_requests_dict = {} # type: Dict[str, Saml2SessionData]
|
||||
|
||||
@@ -31,8 +31,8 @@ from urllib.parse import urlencode
|
||||
import attr
|
||||
from typing_extensions import NoReturn, Protocol
|
||||
|
||||
from twisted.web.http import Request
|
||||
from twisted.web.iweb import IRequest
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
|
||||
@@ -98,11 +98,6 @@ class SsoIdentityProvider(Protocol):
|
||||
"""Optional branding identifier"""
|
||||
return None
|
||||
|
||||
@property
|
||||
def unstable_idp_brand(self) -> Optional[str]:
|
||||
"""Optional brand identifier for the unstable API (see MSC2858)."""
|
||||
return None
|
||||
|
||||
@abc.abstractmethod
|
||||
async def handle_redirect_request(
|
||||
self,
|
||||
@@ -461,7 +456,6 @@ class SsoHandler:
|
||||
|
||||
await self._auth_handler.complete_sso_login(
|
||||
user_id,
|
||||
auth_provider_id,
|
||||
request,
|
||||
client_redirect_url,
|
||||
extra_login_attributes,
|
||||
@@ -611,7 +605,6 @@ class SsoHandler:
|
||||
default_display_name=attributes.display_name,
|
||||
bind_emails=attributes.emails,
|
||||
user_agent_ips=[(user_agent, ip_address)],
|
||||
auth_provider_id=auth_provider_id,
|
||||
)
|
||||
|
||||
await self._store.record_user_external_id(
|
||||
@@ -893,7 +886,6 @@ class SsoHandler:
|
||||
|
||||
await self._auth_handler.complete_sso_login(
|
||||
user_id,
|
||||
session.auth_provider_id,
|
||||
request,
|
||||
session.client_redirect_url,
|
||||
session.extra_login_attributes,
|
||||
|
||||
@@ -52,6 +52,7 @@ logger = logging.getLogger(__name__)
|
||||
# Debug logger for https://github.com/matrix-org/synapse/issues/4422
|
||||
issue4422_logger = logging.getLogger("synapse.handler.sync.4422_debug")
|
||||
|
||||
SYNC_RESPONSE_CACHE_MS = 2 * 60 * 1000
|
||||
|
||||
# Counts the number of times we returned a non-empty sync. `type` is one of
|
||||
# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is
|
||||
@@ -244,7 +245,7 @@ class SyncHandler:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache = ResponseCache(
|
||||
hs.get_clock(), "sync"
|
||||
hs, "sync", timeout_ms=SYNC_RESPONSE_CACHE_MS
|
||||
) # type: ResponseCache[Tuple[Any, ...]]
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
@@ -277,8 +278,9 @@ class SyncHandler:
|
||||
user_id = sync_config.user.to_string()
|
||||
await self.auth.check_auth_blocking(requester=requester)
|
||||
|
||||
res = await self.response_cache.wrap(
|
||||
res = await self.response_cache.wrap_conditional(
|
||||
sync_config.request_key,
|
||||
lambda result: since_token != result.next_batch,
|
||||
self._wait_for_sync_for_user,
|
||||
sync_config,
|
||||
since_token,
|
||||
|
||||
@@ -39,15 +39,12 @@ from zope.interface import implementer, provider
|
||||
from OpenSSL import SSL
|
||||
from OpenSSL.SSL import VERIFY_NONE
|
||||
from twisted.internet import defer, error as twisted_error, protocol, ssl
|
||||
from twisted.internet.address import IPv4Address, IPv6Address
|
||||
from twisted.internet.interfaces import (
|
||||
IAddress,
|
||||
IHostResolution,
|
||||
IReactorPluggableNameResolver,
|
||||
IResolutionReceiver,
|
||||
ITCPTransport,
|
||||
)
|
||||
from twisted.internet.protocol import connectionDone
|
||||
from twisted.internet.task import Cooperator
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web._newclient import ResponseDone
|
||||
@@ -59,20 +56,13 @@ from twisted.web.client import (
|
||||
)
|
||||
from twisted.web.http import PotentialDataLoss
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import (
|
||||
UNKNOWN_LENGTH,
|
||||
IAgent,
|
||||
IBodyProducer,
|
||||
IPolicyForHTTPS,
|
||||
IResponse,
|
||||
)
|
||||
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
|
||||
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri
|
||||
from synapse.http.proxyagent import ProxyAgent
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
|
||||
@@ -160,17 +150,16 @@ class _IPBlacklistingResolver:
|
||||
def resolveHostName(
|
||||
self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0
|
||||
) -> IResolutionReceiver:
|
||||
|
||||
r = recv()
|
||||
addresses = [] # type: List[IAddress]
|
||||
|
||||
def _callback() -> None:
|
||||
has_bad_ip = False
|
||||
for address in addresses:
|
||||
# We only expect IPv4 and IPv6 addresses since only A/AAAA lookups
|
||||
# should go through this path.
|
||||
if not isinstance(address, (IPv4Address, IPv6Address)):
|
||||
continue
|
||||
r.resolutionBegan(None)
|
||||
|
||||
ip_address = IPAddress(address.host)
|
||||
has_bad_ip = False
|
||||
for i in addresses:
|
||||
ip_address = IPAddress(i.host)
|
||||
|
||||
if check_against_blacklist(
|
||||
ip_address, self._ip_whitelist, self._ip_blacklist
|
||||
@@ -185,15 +174,15 @@ class _IPBlacklistingResolver:
|
||||
# request, but all we can really do from here is claim that there were no
|
||||
# valid results.
|
||||
if not has_bad_ip:
|
||||
for address in addresses:
|
||||
recv.addressResolved(address)
|
||||
recv.resolutionComplete()
|
||||
for i in addresses:
|
||||
r.addressResolved(i)
|
||||
r.resolutionComplete()
|
||||
|
||||
@provider(IResolutionReceiver)
|
||||
class EndpointReceiver:
|
||||
@staticmethod
|
||||
def resolutionBegan(resolutionInProgress: IHostResolution) -> None:
|
||||
recv.resolutionBegan(resolutionInProgress)
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def addressResolved(address: IAddress) -> None:
|
||||
@@ -207,10 +196,10 @@ class _IPBlacklistingResolver:
|
||||
EndpointReceiver, hostname, portNumber=portNumber
|
||||
)
|
||||
|
||||
return recv
|
||||
return r
|
||||
|
||||
|
||||
@implementer(ISynapseReactor)
|
||||
@implementer(IReactorPluggableNameResolver)
|
||||
class BlacklistingReactorWrapper:
|
||||
"""
|
||||
A Reactor wrapper which will prevent DNS resolution to blacklisted IP
|
||||
@@ -300,7 +289,8 @@ class SimpleHttpClient:
|
||||
treq_args: Dict[str, Any] = {},
|
||||
ip_whitelist: Optional[IPSet] = None,
|
||||
ip_blacklist: Optional[IPSet] = None,
|
||||
use_proxy: bool = False,
|
||||
http_proxy: Optional[bytes] = None,
|
||||
https_proxy: Optional[bytes] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -310,8 +300,8 @@ class SimpleHttpClient:
|
||||
we may not request.
|
||||
ip_whitelist: The whitelisted IP addresses, that we can
|
||||
request if it were otherwise caught in a blacklist.
|
||||
use_proxy: Whether proxy settings should be discovered and used
|
||||
from conventional environment variables.
|
||||
http_proxy: proxy server to use for http connections. host[:port]
|
||||
https_proxy: proxy server to use for https connections. host[:port]
|
||||
"""
|
||||
self.hs = hs
|
||||
|
||||
@@ -335,7 +325,7 @@ class SimpleHttpClient:
|
||||
# filters out blacklisted IP addresses, to prevent DNS rebinding.
|
||||
self.reactor = BlacklistingReactorWrapper(
|
||||
hs.get_reactor(), self._ip_whitelist, self._ip_blacklist
|
||||
) # type: ISynapseReactor
|
||||
)
|
||||
else:
|
||||
self.reactor = hs.get_reactor()
|
||||
|
||||
@@ -355,8 +345,9 @@ class SimpleHttpClient:
|
||||
connectTimeout=15,
|
||||
contextFactory=self.hs.get_http_client_context_factory(),
|
||||
pool=pool,
|
||||
use_proxy=use_proxy,
|
||||
) # type: IAgent
|
||||
http_proxy=http_proxy,
|
||||
https_proxy=https_proxy,
|
||||
)
|
||||
|
||||
if self._ip_blacklist:
|
||||
# If we have an IP blacklist, we then install the blacklisting Agent
|
||||
@@ -759,37 +750,7 @@ class BodyExceededMaxSize(Exception):
|
||||
"""The maximum allowed size of the HTTP body was exceeded."""
|
||||
|
||||
|
||||
class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which immediately errors upon receiving data."""
|
||||
|
||||
transport = None # type: Optional[ITCPTransport]
|
||||
|
||||
def __init__(self, deferred: defer.Deferred):
|
||||
self.deferred = deferred
|
||||
|
||||
def _maybe_fail(self):
|
||||
"""
|
||||
Report a max size exceed error and disconnect the first time this is called.
|
||||
"""
|
||||
if not self.deferred.called:
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
def dataReceived(self, data: bytes) -> None:
|
||||
self._maybe_fail()
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
self._maybe_fail()
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
|
||||
|
||||
transport = None # type: Optional[ITCPTransport]
|
||||
|
||||
def __init__(
|
||||
self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int]
|
||||
):
|
||||
@@ -812,10 +773,9 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
def connectionLost(self, reason: Failure) -> None:
|
||||
# If the maximum size was already exceeded, there's nothing to do.
|
||||
if self.deferred.called:
|
||||
return
|
||||
@@ -847,15 +807,13 @@ def read_body_with_max_size(
|
||||
Returns:
|
||||
A Deferred which resolves to the length of the read body.
|
||||
"""
|
||||
d = defer.Deferred()
|
||||
|
||||
# If the Content-Length header gives a size larger than the maximum allowed
|
||||
# size, do not bother downloading the body.
|
||||
if max_size is not None and response.length != UNKNOWN_LENGTH:
|
||||
if response.length > max_size:
|
||||
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
|
||||
return d
|
||||
return defer.fail(BodyExceededMaxSize())
|
||||
|
||||
d = defer.Deferred()
|
||||
response.deliverBody(_ReadBodyWithMaxSizeProtocol(stream, d, max_size))
|
||||
return d
|
||||
|
||||
@@ -884,7 +842,6 @@ def encode_query_args(args: Optional[Mapping[str, Union[str, List[str]]]]) -> by
|
||||
return query_str.encode("utf8")
|
||||
|
||||
|
||||
@implementer(IPolicyForHTTPS)
|
||||
class InsecureInterceptableContextFactory(ssl.ContextFactory):
|
||||
"""
|
||||
Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import Any, Generator, List, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
from zope.interface import implementer
|
||||
@@ -35,7 +35,6 @@ from synapse.http.client import BlacklistingAgentWrapper
|
||||
from synapse.http.federation.srv_resolver import Server, SrvResolver
|
||||
from synapse.http.federation.well_known_resolver import WellKnownResolver
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import Clock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -69,7 +68,7 @@ class MatrixFederationAgent:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
reactor: ISynapseReactor,
|
||||
reactor: IReactorCore,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
user_agent: bytes,
|
||||
ip_blacklist: IPSet,
|
||||
@@ -117,7 +116,7 @@ class MatrixFederationAgent:
|
||||
uri: bytes,
|
||||
headers: Optional[Headers] = None,
|
||||
bodyProducer: Optional[IBodyProducer] = None,
|
||||
) -> Generator[defer.Deferred, Any, defer.Deferred]:
|
||||
) -> defer.Deferred:
|
||||
"""
|
||||
Args:
|
||||
method: HTTP method: GET/POST/etc
|
||||
@@ -178,17 +177,17 @@ class MatrixFederationAgent:
|
||||
# We need to make sure the host header is set to the netloc of the
|
||||
# server and that a user-agent is provided.
|
||||
if headers is None:
|
||||
request_headers = Headers()
|
||||
headers = Headers()
|
||||
else:
|
||||
request_headers = headers.copy()
|
||||
headers = headers.copy()
|
||||
|
||||
if not request_headers.hasHeader(b"host"):
|
||||
request_headers.addRawHeader(b"host", parsed_uri.netloc)
|
||||
if not request_headers.hasHeader(b"user-agent"):
|
||||
request_headers.addRawHeader(b"user-agent", self.user_agent)
|
||||
if not headers.hasHeader(b"host"):
|
||||
headers.addRawHeader(b"host", parsed_uri.netloc)
|
||||
if not headers.hasHeader(b"user-agent"):
|
||||
headers.addRawHeader(b"user-agent", self.user_agent)
|
||||
|
||||
res = yield make_deferred_yieldable(
|
||||
self._agent.request(method, uri, request_headers, bodyProducer)
|
||||
self._agent.request(method, uri, headers, bodyProducer)
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
@@ -322,8 +322,7 @@ def _cache_period_from_headers(
|
||||
|
||||
def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]:
|
||||
cache_controls = {}
|
||||
cache_control_headers = headers.getRawHeaders(b"cache-control") or []
|
||||
for hdr in cache_control_headers:
|
||||
for hdr in headers.getRawHeaders(b"cache-control", []):
|
||||
for directive in hdr.split(b","):
|
||||
splits = [x.strip() for x in directive.split(b"=", 1)]
|
||||
k = splits[0].lower()
|
||||
|
||||
@@ -59,7 +59,7 @@ from synapse.logging.opentracing import (
|
||||
start_active_span,
|
||||
tags,
|
||||
)
|
||||
from synapse.types import ISynapseReactor, JsonDict
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -237,14 +237,14 @@ class MatrixFederationHttpClient:
|
||||
# addresses, to prevent DNS rebinding.
|
||||
self.reactor = BlacklistingReactorWrapper(
|
||||
hs.get_reactor(), None, hs.config.federation_ip_range_blacklist
|
||||
) # type: ISynapseReactor
|
||||
)
|
||||
|
||||
user_agent = hs.version_string
|
||||
if hs.config.user_agent_suffix:
|
||||
user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix)
|
||||
user_agent = user_agent.encode("ascii")
|
||||
|
||||
federation_agent = MatrixFederationAgent(
|
||||
self.agent = MatrixFederationAgent(
|
||||
self.reactor,
|
||||
tls_client_options_factory,
|
||||
user_agent,
|
||||
@@ -254,7 +254,7 @@ class MatrixFederationHttpClient:
|
||||
# Use a BlacklistingAgentWrapper to prevent circumventing the IP
|
||||
# blacklist via IP literals in server names
|
||||
self.agent = BlacklistingAgentWrapper(
|
||||
federation_agent,
|
||||
self.agent,
|
||||
ip_blacklist=hs.config.federation_ip_range_blacklist,
|
||||
)
|
||||
|
||||
@@ -534,10 +534,9 @@ class MatrixFederationHttpClient:
|
||||
response.code, response_phrase, body
|
||||
)
|
||||
|
||||
# Retry if the error is a 5xx or a 429 (Too Many
|
||||
# Requests), otherwise just raise a standard
|
||||
# `HttpResponseException`
|
||||
if 500 <= response.code < 600 or response.code == 429:
|
||||
# Retry if the error is a 429 (Too Many Requests),
|
||||
# otherwise just raise a standard HttpResponseException
|
||||
if response.code == 429:
|
||||
raise RequestSendFailed(exc, can_retry=True) from exc
|
||||
else:
|
||||
raise exc
|
||||
@@ -1050,14 +1049,14 @@ def check_content_type_is_json(headers: Headers) -> None:
|
||||
RequestSendFailed: if the Content-Type header is missing or isn't JSON
|
||||
|
||||
"""
|
||||
content_type_headers = headers.getRawHeaders(b"Content-Type")
|
||||
if content_type_headers is None:
|
||||
c_type = headers.getRawHeaders(b"Content-Type")
|
||||
if c_type is None:
|
||||
raise RequestSendFailed(
|
||||
RuntimeError("No Content-Type header received from remote server"),
|
||||
can_retry=False,
|
||||
)
|
||||
|
||||
c_type = content_type_headers[0].decode("ascii") # only the first header
|
||||
c_type = c_type[0].decode("ascii") # only the first header
|
||||
val, options = cgi.parse_header(c_type)
|
||||
if val != "application/json":
|
||||
raise RequestSendFailed(
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
from urllib.request import getproxies_environment, proxy_bypass_environment
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
@@ -59,9 +58,6 @@ class ProxyAgent(_AgentBase):
|
||||
|
||||
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
|
||||
non-persistent pool instance will be created.
|
||||
|
||||
use_proxy (bool): Whether proxy settings should be discovered and used
|
||||
from conventional environment variables.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -72,7 +68,8 @@ class ProxyAgent(_AgentBase):
|
||||
connectTimeout=None,
|
||||
bindAddress=None,
|
||||
pool=None,
|
||||
use_proxy=False,
|
||||
http_proxy=None,
|
||||
https_proxy=None,
|
||||
):
|
||||
_AgentBase.__init__(self, reactor, pool)
|
||||
|
||||
@@ -87,15 +84,6 @@ class ProxyAgent(_AgentBase):
|
||||
if bindAddress is not None:
|
||||
self._endpoint_kwargs["bindAddress"] = bindAddress
|
||||
|
||||
http_proxy = None
|
||||
https_proxy = None
|
||||
no_proxy = None
|
||||
if use_proxy:
|
||||
proxies = getproxies_environment()
|
||||
http_proxy = proxies["http"].encode() if "http" in proxies else None
|
||||
https_proxy = proxies["https"].encode() if "https" in proxies else None
|
||||
no_proxy = proxies["no"] if "no" in proxies else None
|
||||
|
||||
self.http_proxy_endpoint = _http_proxy_endpoint(
|
||||
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
|
||||
)
|
||||
@@ -104,8 +92,6 @@ class ProxyAgent(_AgentBase):
|
||||
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
|
||||
)
|
||||
|
||||
self.no_proxy = no_proxy
|
||||
|
||||
self._policy_for_https = contextFactory
|
||||
self._reactor = reactor
|
||||
|
||||
@@ -153,28 +139,13 @@ class ProxyAgent(_AgentBase):
|
||||
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
|
||||
request_path = parsed_uri.originForm
|
||||
|
||||
should_skip_proxy = False
|
||||
if self.no_proxy is not None:
|
||||
should_skip_proxy = proxy_bypass_environment(
|
||||
parsed_uri.host.decode(),
|
||||
proxies={"no": self.no_proxy},
|
||||
)
|
||||
|
||||
if (
|
||||
parsed_uri.scheme == b"http"
|
||||
and self.http_proxy_endpoint
|
||||
and not should_skip_proxy
|
||||
):
|
||||
if parsed_uri.scheme == b"http" and self.http_proxy_endpoint:
|
||||
# Cache *all* connections under the same key, since we are only
|
||||
# connecting to a single destination, the proxy:
|
||||
pool_key = ("http-proxy", self.http_proxy_endpoint)
|
||||
endpoint = self.http_proxy_endpoint
|
||||
request_path = uri
|
||||
elif (
|
||||
parsed_uri.scheme == b"https"
|
||||
and self.https_proxy_endpoint
|
||||
and not should_skip_proxy
|
||||
):
|
||||
elif parsed_uri.scheme == b"https" and self.https_proxy_endpoint:
|
||||
endpoint = HTTPConnectProxyEndpoint(
|
||||
self.proxy_reactor,
|
||||
self.https_proxy_endpoint,
|
||||
|
||||
@@ -21,7 +21,6 @@ import logging
|
||||
import types
|
||||
import urllib
|
||||
from http import HTTPStatus
|
||||
from inspect import isawaitable
|
||||
from io import BytesIO
|
||||
from typing import (
|
||||
Any,
|
||||
@@ -31,7 +30,6 @@ from typing import (
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Tuple,
|
||||
Union,
|
||||
@@ -81,12 +79,10 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
||||
"""Sends a JSON error response to clients."""
|
||||
|
||||
if f.check(SynapseError):
|
||||
# mypy doesn't understand that f.check asserts the type.
|
||||
exc = f.value # type: SynapseError # type: ignore
|
||||
error_code = exc.code
|
||||
error_dict = exc.error_dict()
|
||||
error_code = f.value.code
|
||||
error_dict = f.value.error_dict()
|
||||
|
||||
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
|
||||
logger.info("%s SynapseError: %s - %s", request, error_code, f.value.msg)
|
||||
else:
|
||||
error_code = 500
|
||||
error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN}
|
||||
@@ -95,7 +91,7 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
||||
"Failed handle request via %r: %r",
|
||||
request.request_metrics.name,
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
# Only respond with an error response if we haven't already started writing,
|
||||
@@ -132,8 +128,7 @@ def return_html_error(
|
||||
`{msg}` placeholders), or a jinja2 template
|
||||
"""
|
||||
if f.check(CodeMessageException):
|
||||
# mypy doesn't understand that f.check asserts the type.
|
||||
cme = f.value # type: CodeMessageException # type: ignore
|
||||
cme = f.value
|
||||
code = cme.code
|
||||
msg = cme.msg
|
||||
|
||||
@@ -147,7 +142,7 @@ def return_html_error(
|
||||
logger.error(
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
else:
|
||||
code = HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
@@ -156,7 +151,7 @@ def return_html_error(
|
||||
logger.error(
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
if isinstance(error_template, str):
|
||||
@@ -283,7 +278,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||
raw_callback_return = method_handler(request)
|
||||
|
||||
# Is it synchronous? We'll allow this for now.
|
||||
if isawaitable(raw_callback_return):
|
||||
if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)):
|
||||
callback_return = await raw_callback_return
|
||||
else:
|
||||
callback_return = raw_callback_return # type: ignore
|
||||
@@ -404,10 +399,8 @@ class JsonResource(DirectServeJsonResource):
|
||||
A tuple of the callback to use, the name of the servlet, and the
|
||||
key word arguments to pass to the callback
|
||||
"""
|
||||
# At this point the path must be bytes.
|
||||
request_path_bytes = request.path # type: bytes # type: ignore
|
||||
request_path = request_path_bytes.decode("ascii")
|
||||
# Treat HEAD requests as GET requests.
|
||||
request_path = request.path.decode("ascii")
|
||||
request_method = request.method
|
||||
if request_method == b"HEAD":
|
||||
request_method = b"GET"
|
||||
@@ -558,7 +551,7 @@ class _ByteProducer:
|
||||
request: Request,
|
||||
iterator: Iterator[bytes],
|
||||
):
|
||||
self._request = request # type: Optional[Request]
|
||||
self._request = request
|
||||
self._iterator = iterator
|
||||
self._paused = False
|
||||
|
||||
@@ -570,7 +563,7 @@ class _ByteProducer:
|
||||
"""
|
||||
Send a list of bytes as a chunk of a response.
|
||||
"""
|
||||
if not data or not self._request:
|
||||
if not data:
|
||||
return
|
||||
self._request.write(b"".join(data))
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import contextlib
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, Type, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
@@ -57,7 +57,7 @@ class SynapseRequest(Request):
|
||||
|
||||
def __init__(self, channel, *args, **kw):
|
||||
Request.__init__(self, channel, *args, **kw)
|
||||
self.site = channel.site # type: SynapseSite
|
||||
self.site = channel.site
|
||||
self._channel = channel # this is used by the tests
|
||||
self.start_time = 0.0
|
||||
|
||||
@@ -96,34 +96,25 @@ class SynapseRequest(Request):
|
||||
def get_request_id(self):
|
||||
return "%s-%i" % (self.get_method(), self.request_seq)
|
||||
|
||||
def get_redacted_uri(self) -> str:
|
||||
"""Gets the redacted URI associated with the request (or placeholder if the URI
|
||||
has not yet been received).
|
||||
|
||||
Note: This is necessary as the placeholder value in twisted is str
|
||||
rather than bytes, so we need to sanitise `self.uri`.
|
||||
|
||||
Returns:
|
||||
The redacted URI as a string.
|
||||
"""
|
||||
uri = self.uri # type: Union[bytes, str]
|
||||
def get_redacted_uri(self):
|
||||
uri = self.uri
|
||||
if isinstance(uri, bytes):
|
||||
uri = uri.decode("ascii", errors="replace")
|
||||
uri = self.uri.decode("ascii", errors="replace")
|
||||
return redact_uri(uri)
|
||||
|
||||
def get_method(self) -> str:
|
||||
"""Gets the method associated with the request (or placeholder if method
|
||||
has not yet been received).
|
||||
def get_method(self):
|
||||
"""Gets the method associated with the request (or placeholder if not
|
||||
method has yet been received).
|
||||
|
||||
Note: This is necessary as the placeholder value in twisted is str
|
||||
rather than bytes, so we need to sanitise `self.method`.
|
||||
|
||||
Returns:
|
||||
The request method as a string.
|
||||
str
|
||||
"""
|
||||
method = self.method # type: Union[bytes, str]
|
||||
method = self.method
|
||||
if isinstance(method, bytes):
|
||||
return self.method.decode("ascii")
|
||||
method = self.method.decode("ascii")
|
||||
return method
|
||||
|
||||
def render(self, resrc):
|
||||
@@ -384,9 +375,9 @@ class XForwardedForRequest(SynapseRequest):
|
||||
else:
|
||||
# this is done largely for backwards-compatibility so that people that
|
||||
# haven't set an x-forwarded-proto header don't get a redirect loop.
|
||||
logger.warning(
|
||||
"forwarded request lacks an x-forwarded-proto header: assuming https"
|
||||
)
|
||||
#logger.warning(
|
||||
# "forwarded request lacks an x-forwarded-proto header: assuming https"
|
||||
#)
|
||||
self._forwarded_https = True
|
||||
|
||||
def isSecure(self):
|
||||
@@ -441,9 +432,7 @@ class SynapseSite(Site):
|
||||
|
||||
assert config.http_options is not None
|
||||
proxied = config.http_options.x_forwarded
|
||||
self.requestFactory = (
|
||||
XForwardedForRequest if proxied else SynapseRequest
|
||||
) # type: Type[Request]
|
||||
self.requestFactory = XForwardedForRequest if proxied else SynapseRequest
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
self.server_version_string = server_version_string.encode("ascii")
|
||||
|
||||
|
||||
@@ -32,9 +32,8 @@ from twisted.internet.endpoints import (
|
||||
TCP4ClientEndpoint,
|
||||
TCP6ClientEndpoint,
|
||||
)
|
||||
from twisted.internet.interfaces import IPushProducer, IStreamClientEndpoint
|
||||
from twisted.internet.interfaces import IPushProducer, ITransport
|
||||
from twisted.internet.protocol import Factory, Protocol
|
||||
from twisted.internet.tcp import Connection
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -53,9 +52,7 @@ class LogProducer:
|
||||
format: A callable to format the log record to a string.
|
||||
"""
|
||||
|
||||
# This is essentially ITCPTransport, but that is missing certain fields
|
||||
# (connected and registerProducer) which are part of the implementation.
|
||||
transport = attr.ib(type=Connection)
|
||||
transport = attr.ib(type=ITransport)
|
||||
_format = attr.ib(type=Callable[[logging.LogRecord], str])
|
||||
_buffer = attr.ib(type=deque)
|
||||
_paused = attr.ib(default=False, type=bool, init=False)
|
||||
@@ -124,9 +121,7 @@ class RemoteHandler(logging.Handler):
|
||||
try:
|
||||
ip = ip_address(self.host)
|
||||
if isinstance(ip, IPv4Address):
|
||||
endpoint = TCP4ClientEndpoint(
|
||||
_reactor, self.host, self.port
|
||||
) # type: IStreamClientEndpoint
|
||||
endpoint = TCP4ClientEndpoint(_reactor, self.host, self.port)
|
||||
elif isinstance(ip, IPv6Address):
|
||||
endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port)
|
||||
else:
|
||||
@@ -152,6 +147,8 @@ class RemoteHandler(logging.Handler):
|
||||
if self._connection_waiter:
|
||||
return
|
||||
|
||||
self._connection_waiter = self._service.whenConnected(failAfterFailures=1)
|
||||
|
||||
def fail(failure: Failure) -> None:
|
||||
# If the Deferred was cancelled (e.g. during shutdown) do not try to
|
||||
# reconnect (this will cause an infinite loop of errors).
|
||||
@@ -164,13 +161,9 @@ class RemoteHandler(logging.Handler):
|
||||
self._connect()
|
||||
|
||||
def writer(result: Protocol) -> None:
|
||||
# Force recognising transport as a Connection and not the more
|
||||
# generic ITransport.
|
||||
transport = result.transport # type: Connection # type: ignore
|
||||
|
||||
# We have a connection. If we already have a producer, and its
|
||||
# transport is the same, just trigger a resumeProducing.
|
||||
if self._producer and transport is self._producer.transport:
|
||||
if self._producer and result.transport is self._producer.transport:
|
||||
self._producer.resumeProducing()
|
||||
self._connection_waiter = None
|
||||
return
|
||||
@@ -182,16 +175,14 @@ class RemoteHandler(logging.Handler):
|
||||
# Make a new producer and start it.
|
||||
self._producer = LogProducer(
|
||||
buffer=self._buffer,
|
||||
transport=transport,
|
||||
transport=result.transport,
|
||||
format=self.format,
|
||||
)
|
||||
transport.registerProducer(self._producer, True)
|
||||
result.transport.registerProducer(self._producer, True)
|
||||
self._producer.resumeProducing()
|
||||
self._connection_waiter = None
|
||||
|
||||
deferred = self._service.whenConnected(failAfterFailures=1) # type: Deferred
|
||||
deferred.addCallbacks(writer, fail)
|
||||
self._connection_waiter = deferred
|
||||
self._connection_waiter.addCallbacks(writer, fail)
|
||||
|
||||
def _handle_pressure(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -669,7 +669,7 @@ def preserve_fn(f):
|
||||
return g
|
||||
|
||||
|
||||
def run_in_background(f, *args, **kwargs) -> defer.Deferred:
|
||||
def run_in_background(f, *args, **kwargs):
|
||||
"""Calls a function, ensuring that the current context is restored after
|
||||
return from the function, and that the sentinel context is set once the
|
||||
deferred returned by the function completes.
|
||||
@@ -697,10 +697,8 @@ def run_in_background(f, *args, **kwargs) -> defer.Deferred:
|
||||
if isinstance(res, types.CoroutineType):
|
||||
res = defer.ensureDeferred(res)
|
||||
|
||||
# At this point we should have a Deferred, if not then f was a synchronous
|
||||
# function, wrap it in a Deferred for consistency.
|
||||
if not isinstance(res, defer.Deferred):
|
||||
return defer.succeed(res)
|
||||
return res
|
||||
|
||||
if res.called and not res.paused:
|
||||
# The function should have maintained the logcontext, so we can
|
||||
|
||||
@@ -527,7 +527,7 @@ class ReactorLastSeenMetric:
|
||||
REGISTRY.register(ReactorLastSeenMetric())
|
||||
|
||||
|
||||
def runUntilCurrentTimer(reactor, func):
|
||||
def runUntilCurrentTimer(func):
|
||||
@functools.wraps(func)
|
||||
def f(*args, **kwargs):
|
||||
now = reactor.seconds()
|
||||
@@ -590,14 +590,13 @@ def runUntilCurrentTimer(reactor, func):
|
||||
|
||||
try:
|
||||
# Ensure the reactor has all the attributes we expect
|
||||
reactor.seconds # type: ignore
|
||||
reactor.runUntilCurrent # type: ignore
|
||||
reactor._newTimedCalls # type: ignore
|
||||
reactor.threadCallQueue # type: ignore
|
||||
reactor.runUntilCurrent
|
||||
reactor._newTimedCalls
|
||||
reactor.threadCallQueue
|
||||
|
||||
# runUntilCurrent is called when we have pending calls. It is called once
|
||||
# per iteratation after fd polling.
|
||||
reactor.runUntilCurrent = runUntilCurrentTimer(reactor, reactor.runUntilCurrent) # type: ignore
|
||||
reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
|
||||
|
||||
# We manually run the GC each reactor tick so that we can get some metrics
|
||||
# about time spent doing GC,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Generator, Iterable, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Iterable, Optional, Tuple
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -203,26 +203,11 @@ class ModuleApi:
|
||||
)
|
||||
|
||||
def generate_short_term_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: str = "",
|
||||
self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
|
||||
) -> str:
|
||||
"""Generate a login token suitable for m.login.token authentication
|
||||
|
||||
Args:
|
||||
user_id: gives the ID of the user that the token is for
|
||||
|
||||
duration_in_ms: the time that the token will be valid for
|
||||
|
||||
auth_provider_id: the ID of the SSO IdP that the user used to authenticate
|
||||
to get this token, if any. This is encoded in the token so that
|
||||
/login can report stats on number of successful logins by IdP.
|
||||
"""
|
||||
"""Generate a login token suitable for m.login.token authentication"""
|
||||
return self._hs.get_macaroon_generator().generate_short_term_login_token(
|
||||
user_id,
|
||||
auth_provider_id,
|
||||
duration_in_ms,
|
||||
user_id, duration_in_ms
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -291,7 +276,6 @@ class ModuleApi:
|
||||
"""
|
||||
self._auth_handler._complete_sso_login(
|
||||
registered_user_id,
|
||||
"<unknown>",
|
||||
request,
|
||||
client_redirect_url,
|
||||
)
|
||||
@@ -302,7 +286,6 @@ class ModuleApi:
|
||||
request: SynapseRequest,
|
||||
client_redirect_url: str,
|
||||
new_user: bool = False,
|
||||
auth_provider_id: str = "<unknown>",
|
||||
):
|
||||
"""Complete a SSO login by redirecting the user to a page to confirm whether they
|
||||
want their access token sent to `client_redirect_url`, or redirect them to that
|
||||
@@ -316,21 +299,15 @@ class ModuleApi:
|
||||
redirect them directly if whitelisted).
|
||||
new_user: set to true to use wording for the consent appropriate to a user
|
||||
who has just registered.
|
||||
auth_provider_id: the ID of the SSO IdP which was used to log in. This
|
||||
is used to track counts of sucessful logins by IdP.
|
||||
"""
|
||||
await self._auth_handler.complete_sso_login(
|
||||
registered_user_id,
|
||||
auth_provider_id,
|
||||
request,
|
||||
client_redirect_url,
|
||||
new_user=new_user,
|
||||
registered_user_id, request, client_redirect_url, new_user=new_user
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_events_in_room(
|
||||
self, room_id: str, types: Iterable[Tuple[str, Optional[str]]]
|
||||
) -> Generator[defer.Deferred, Any, defer.Deferred]:
|
||||
) -> defer.Deferred:
|
||||
"""Gets current state events for the given room.
|
||||
|
||||
(This is exposed for compatibility with the old SpamCheckerApi. We should
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional
|
||||
|
||||
from twisted.internet.base import DelayedCall
|
||||
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.push import Pusher, PusherConfig, ThrottleParams
|
||||
@@ -66,7 +66,7 @@ class EmailPusher(Pusher):
|
||||
|
||||
self.store = self.hs.get_datastore()
|
||||
self.email = pusher_config.pushkey
|
||||
self.timed_call = None # type: Optional[IDelayedCall]
|
||||
self.timed_call = None # type: Optional[DelayedCall]
|
||||
self.throttle_params = {} # type: Dict[str, ThrottleParams]
|
||||
self._inited = False
|
||||
|
||||
|
||||
@@ -15,12 +15,11 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, Union
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.events import EventBase
|
||||
@@ -72,7 +71,7 @@ class HttpPusher(Pusher):
|
||||
self.data = pusher_config.data
|
||||
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
|
||||
self.failing_since = pusher_config.failing_since
|
||||
self.timed_call = None # type: Optional[IDelayedCall]
|
||||
self.timed_call = None
|
||||
self._is_processing = False
|
||||
self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
|
||||
self._pusherpool = hs.get_pusherpool()
|
||||
@@ -102,6 +101,11 @@ class HttpPusher(Pusher):
|
||||
"'url' must have a path of '/_matrix/push/v1/notify'"
|
||||
)
|
||||
|
||||
url = url.replace(
|
||||
"https://matrix.org/_matrix/push/v1/notify",
|
||||
"http://10.103.0.7/_matrix/push/v1/notify",
|
||||
)
|
||||
|
||||
self.url = url
|
||||
self.http_client = hs.get_proxied_blacklisted_http_client()
|
||||
self.data_minus_url = {}
|
||||
|
||||
@@ -18,7 +18,7 @@ import logging
|
||||
import re
|
||||
import urllib
|
||||
from inspect import signature
|
||||
from typing import TYPE_CHECKING, Dict, List, Tuple
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from prometheus_client import Counter, Gauge
|
||||
|
||||
@@ -28,9 +28,6 @@ from synapse.logging.opentracing import inject_active_span_byte_dict, trace
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_pending_outgoing_requests = Gauge(
|
||||
@@ -91,10 +88,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
CACHE = True
|
||||
RETRY_ON_TIMEOUT = True
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
def __init__(self, hs):
|
||||
if self.CACHE:
|
||||
self.response_cache = ResponseCache(
|
||||
hs.get_clock(), "repl." + self.NAME, timeout_ms=30 * 60 * 1000
|
||||
hs, "repl." + self.NAME, timeout_ms=30 * 60 * 1000
|
||||
) # type: ResponseCache[str]
|
||||
|
||||
# We reserve `instance_name` as a parameter to sending requests, so we
|
||||
|
||||
@@ -61,7 +61,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
is_guest = content["is_guest"]
|
||||
is_appservice_ghost = content["is_appservice_ghost"]
|
||||
|
||||
res = await self.registration_handler.register_device_inner(
|
||||
device_id, access_token = await self.registration_handler.register_device(
|
||||
user_id,
|
||||
device_id,
|
||||
initial_display_name,
|
||||
@@ -69,7 +69,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
)
|
||||
|
||||
return 200, res
|
||||
return 200, {"device_id": device_id, "access_token": access_token}
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
|
||||
@@ -15,10 +15,9 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
from twisted.web.http import Request
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util.distributor import user_left_room
|
||||
@@ -79,7 +78,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore
|
||||
self, request: SynapseRequest, room_id: str, user_id: str
|
||||
self, request: Request, room_id: str, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
@@ -87,6 +86,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
||||
event_content = content["content"]
|
||||
|
||||
requester = Requester.deserialize(self.store, content["requester"])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
logger.info("remote_join: %s into room: %s", user_id, room_id)
|
||||
@@ -147,7 +147,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore
|
||||
self, request: SynapseRequest, invite_event_id: str
|
||||
self, request: Request, invite_event_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
@@ -155,6 +155,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
||||
event_content = content["content"]
|
||||
|
||||
requester = Requester.deserialize(self.store, content["requester"])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
# hopefully we're now on the master, so this won't recurse!
|
||||
|
||||
@@ -108,7 +108,9 @@ class ReplicationDataHandler:
|
||||
|
||||
# Map from stream to list of deferreds waiting for the stream to
|
||||
# arrive at a particular position. The lists are sorted by stream position.
|
||||
self._streams_to_waiters = {} # type: Dict[str, List[Tuple[int, Deferred]]]
|
||||
self._streams_to_waiters = (
|
||||
{}
|
||||
) # type: Dict[str, List[Tuple[int, Deferred[None]]]]
|
||||
|
||||
async def on_rdata(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
|
||||
@@ -48,7 +48,7 @@ from synapse.replication.tcp.commands import (
|
||||
UserIpCommand,
|
||||
UserSyncCommand,
|
||||
)
|
||||
from synapse.replication.tcp.protocol import IReplicationConnection
|
||||
from synapse.replication.tcp.protocol import AbstractConnection
|
||||
from synapse.replication.tcp.streams import (
|
||||
STREAMS_MAP,
|
||||
AccountDataStream,
|
||||
@@ -82,7 +82,7 @@ user_ip_cache_counter = Counter("synapse_replication_tcp_resource_user_ip_cache"
|
||||
|
||||
# the type of the entries in _command_queues_by_stream
|
||||
_StreamCommandQueue = Deque[
|
||||
Tuple[Union[RdataCommand, PositionCommand], IReplicationConnection]
|
||||
Tuple[Union[RdataCommand, PositionCommand], AbstractConnection]
|
||||
]
|
||||
|
||||
|
||||
@@ -174,7 +174,7 @@ class ReplicationCommandHandler:
|
||||
|
||||
# The currently connected connections. (The list of places we need to send
|
||||
# outgoing replication commands to.)
|
||||
self._connections = [] # type: List[IReplicationConnection]
|
||||
self._connections = [] # type: List[AbstractConnection]
|
||||
|
||||
LaterGauge(
|
||||
"synapse_replication_tcp_resource_total_connections",
|
||||
@@ -197,7 +197,7 @@ class ReplicationCommandHandler:
|
||||
|
||||
# For each connection, the incoming stream names that have received a POSITION
|
||||
# from that connection.
|
||||
self._streams_by_connection = {} # type: Dict[IReplicationConnection, Set[str]]
|
||||
self._streams_by_connection = {} # type: Dict[AbstractConnection, Set[str]]
|
||||
|
||||
LaterGauge(
|
||||
"synapse_replication_tcp_command_queue",
|
||||
@@ -220,7 +220,7 @@ class ReplicationCommandHandler:
|
||||
self._server_notices_sender = hs.get_server_notices_sender()
|
||||
|
||||
def _add_command_to_stream_queue(
|
||||
self, conn: IReplicationConnection, cmd: Union[RdataCommand, PositionCommand]
|
||||
self, conn: AbstractConnection, cmd: Union[RdataCommand, PositionCommand]
|
||||
) -> None:
|
||||
"""Queue the given received command for processing
|
||||
|
||||
@@ -267,7 +267,7 @@ class ReplicationCommandHandler:
|
||||
async def _process_command(
|
||||
self,
|
||||
cmd: Union[PositionCommand, RdataCommand],
|
||||
conn: IReplicationConnection,
|
||||
conn: AbstractConnection,
|
||||
stream_name: str,
|
||||
) -> None:
|
||||
if isinstance(cmd, PositionCommand):
|
||||
@@ -302,7 +302,7 @@ class ReplicationCommandHandler:
|
||||
hs, outbound_redis_connection
|
||||
)
|
||||
hs.get_reactor().connectTCP(
|
||||
hs.config.redis.redis_host.encode(),
|
||||
hs.config.redis.redis_host,
|
||||
hs.config.redis.redis_port,
|
||||
self._factory,
|
||||
)
|
||||
@@ -311,7 +311,7 @@ class ReplicationCommandHandler:
|
||||
self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
|
||||
host = hs.config.worker_replication_host
|
||||
port = hs.config.worker_replication_port
|
||||
hs.get_reactor().connectTCP(host.encode(), port, self._factory)
|
||||
hs.get_reactor().connectTCP(host, port, self._factory)
|
||||
|
||||
def get_streams(self) -> Dict[str, Stream]:
|
||||
"""Get a map from stream name to all streams."""
|
||||
@@ -321,10 +321,10 @@ class ReplicationCommandHandler:
|
||||
"""Get a list of streams that this instances replicates."""
|
||||
return self._streams_to_replicate
|
||||
|
||||
def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand):
|
||||
def on_REPLICATE(self, conn: AbstractConnection, cmd: ReplicateCommand):
|
||||
self.send_positions_to_connection(conn)
|
||||
|
||||
def send_positions_to_connection(self, conn: IReplicationConnection):
|
||||
def send_positions_to_connection(self, conn: AbstractConnection):
|
||||
"""Send current position of all streams this process is source of to
|
||||
the connection.
|
||||
"""
|
||||
@@ -347,7 +347,7 @@ class ReplicationCommandHandler:
|
||||
)
|
||||
|
||||
def on_USER_SYNC(
|
||||
self, conn: IReplicationConnection, cmd: UserSyncCommand
|
||||
self, conn: AbstractConnection, cmd: UserSyncCommand
|
||||
) -> Optional[Awaitable[None]]:
|
||||
user_sync_counter.inc()
|
||||
|
||||
@@ -359,23 +359,21 @@ class ReplicationCommandHandler:
|
||||
return None
|
||||
|
||||
def on_CLEAR_USER_SYNC(
|
||||
self, conn: IReplicationConnection, cmd: ClearUserSyncsCommand
|
||||
self, conn: AbstractConnection, cmd: ClearUserSyncsCommand
|
||||
) -> Optional[Awaitable[None]]:
|
||||
if self._is_master:
|
||||
return self._presence_handler.update_external_syncs_clear(cmd.instance_id)
|
||||
else:
|
||||
return None
|
||||
|
||||
def on_FEDERATION_ACK(
|
||||
self, conn: IReplicationConnection, cmd: FederationAckCommand
|
||||
):
|
||||
def on_FEDERATION_ACK(self, conn: AbstractConnection, cmd: FederationAckCommand):
|
||||
federation_ack_counter.inc()
|
||||
|
||||
if self._federation_sender:
|
||||
self._federation_sender.federation_ack(cmd.instance_name, cmd.token)
|
||||
|
||||
def on_USER_IP(
|
||||
self, conn: IReplicationConnection, cmd: UserIpCommand
|
||||
self, conn: AbstractConnection, cmd: UserIpCommand
|
||||
) -> Optional[Awaitable[None]]:
|
||||
user_ip_cache_counter.inc()
|
||||
|
||||
@@ -397,7 +395,7 @@ class ReplicationCommandHandler:
|
||||
assert self._server_notices_sender is not None
|
||||
await self._server_notices_sender.on_user_ip(cmd.user_id)
|
||||
|
||||
def on_RDATA(self, conn: IReplicationConnection, cmd: RdataCommand):
|
||||
def on_RDATA(self, conn: AbstractConnection, cmd: RdataCommand):
|
||||
if cmd.instance_name == self._instance_name:
|
||||
# Ignore RDATA that are just our own echoes
|
||||
return
|
||||
@@ -414,7 +412,7 @@ class ReplicationCommandHandler:
|
||||
self._add_command_to_stream_queue(conn, cmd)
|
||||
|
||||
async def _process_rdata(
|
||||
self, stream_name: str, conn: IReplicationConnection, cmd: RdataCommand
|
||||
self, stream_name: str, conn: AbstractConnection, cmd: RdataCommand
|
||||
) -> None:
|
||||
"""Process an RDATA command
|
||||
|
||||
@@ -488,7 +486,7 @@ class ReplicationCommandHandler:
|
||||
stream_name, instance_name, token, rows
|
||||
)
|
||||
|
||||
def on_POSITION(self, conn: IReplicationConnection, cmd: PositionCommand):
|
||||
def on_POSITION(self, conn: AbstractConnection, cmd: PositionCommand):
|
||||
if cmd.instance_name == self._instance_name:
|
||||
# Ignore POSITION that are just our own echoes
|
||||
return
|
||||
@@ -498,7 +496,7 @@ class ReplicationCommandHandler:
|
||||
self._add_command_to_stream_queue(conn, cmd)
|
||||
|
||||
async def _process_position(
|
||||
self, stream_name: str, conn: IReplicationConnection, cmd: PositionCommand
|
||||
self, stream_name: str, conn: AbstractConnection, cmd: PositionCommand
|
||||
) -> None:
|
||||
"""Process a POSITION command
|
||||
|
||||
@@ -555,9 +553,7 @@ class ReplicationCommandHandler:
|
||||
|
||||
self._streams_by_connection.setdefault(conn, set()).add(stream_name)
|
||||
|
||||
def on_REMOTE_SERVER_UP(
|
||||
self, conn: IReplicationConnection, cmd: RemoteServerUpCommand
|
||||
):
|
||||
def on_REMOTE_SERVER_UP(self, conn: AbstractConnection, cmd: RemoteServerUpCommand):
|
||||
""""Called when get a new REMOTE_SERVER_UP command."""
|
||||
self._replication_data_handler.on_remote_server_up(cmd.data)
|
||||
|
||||
@@ -580,7 +576,7 @@ class ReplicationCommandHandler:
|
||||
# between two instances, but that is not currently supported).
|
||||
self.send_command(cmd, ignore_conn=conn)
|
||||
|
||||
def new_connection(self, connection: IReplicationConnection):
|
||||
def new_connection(self, connection: AbstractConnection):
|
||||
"""Called when we have a new connection."""
|
||||
self._connections.append(connection)
|
||||
|
||||
@@ -607,7 +603,7 @@ class ReplicationCommandHandler:
|
||||
UserSyncCommand(self._instance_id, user_id, True, now)
|
||||
)
|
||||
|
||||
def lost_connection(self, connection: IReplicationConnection):
|
||||
def lost_connection(self, connection: AbstractConnection):
|
||||
"""Called when a connection is closed/lost."""
|
||||
# we no longer need _streams_by_connection for this connection.
|
||||
streams = self._streams_by_connection.pop(connection, None)
|
||||
@@ -628,7 +624,7 @@ class ReplicationCommandHandler:
|
||||
return bool(self._connections)
|
||||
|
||||
def send_command(
|
||||
self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None
|
||||
self, cmd: Command, ignore_conn: Optional[AbstractConnection] = None
|
||||
):
|
||||
"""Send a command to all connected connections.
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ indicate which side is sending, these are *not* included on the wire::
|
||||
> ERROR server stopping
|
||||
* connection closed by server *
|
||||
"""
|
||||
import abc
|
||||
import fcntl
|
||||
import logging
|
||||
import struct
|
||||
@@ -53,10 +54,8 @@ from inspect import isawaitable
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
from prometheus_client import Counter
|
||||
from zope.interface import Interface, implementer
|
||||
|
||||
from twisted.internet import task
|
||||
from twisted.internet.tcp import Connection
|
||||
from twisted.protocols.basic import LineOnlyReceiver
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
@@ -122,14 +121,6 @@ class ConnectionStates:
|
||||
CLOSED = "closed"
|
||||
|
||||
|
||||
class IReplicationConnection(Interface):
|
||||
"""An interface for replication connections."""
|
||||
|
||||
def send_command(cmd: Command):
|
||||
"""Send the command down the connection"""
|
||||
|
||||
|
||||
@implementer(IReplicationConnection)
|
||||
class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
"""Base replication protocol shared between client and server.
|
||||
|
||||
@@ -146,10 +137,6 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
(if they send a `PING` command)
|
||||
"""
|
||||
|
||||
# The transport is going to be an ITCPTransport, but that doesn't have the
|
||||
# (un)registerProducer methods, those are only on the implementation.
|
||||
transport = None # type: Connection
|
||||
|
||||
delimiter = b"\n"
|
||||
|
||||
# Valid commands we expect to receive
|
||||
@@ -194,7 +181,6 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
|
||||
connected_connections.append(self) # Register connection for metrics
|
||||
|
||||
assert self.transport is not None
|
||||
self.transport.registerProducer(self, True) # For the *Producing callbacks
|
||||
|
||||
self._send_pending_commands()
|
||||
@@ -219,7 +205,6 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
logger.info(
|
||||
"[%s] Failed to close connection gracefully, aborting", self.id()
|
||||
)
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
else:
|
||||
if now - self.last_sent_command >= PING_TIME:
|
||||
@@ -309,7 +294,6 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
def close(self):
|
||||
logger.warning("[%s] Closing connection", self.id())
|
||||
self.time_we_closed = self.clock.time_msec()
|
||||
assert self.transport is not None
|
||||
self.transport.loseConnection()
|
||||
self.on_connection_closed()
|
||||
|
||||
@@ -407,7 +391,6 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
def connectionLost(self, reason):
|
||||
logger.info("[%s] Replication connection closed: %r", self.id(), reason)
|
||||
if isinstance(reason, Failure):
|
||||
assert reason.type is not None
|
||||
connection_close_counter.labels(reason.type.__name__).inc()
|
||||
else:
|
||||
connection_close_counter.labels(reason.__class__.__name__).inc()
|
||||
@@ -512,6 +495,20 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
self.send_command(ReplicateCommand())
|
||||
|
||||
|
||||
class AbstractConnection(abc.ABC):
|
||||
"""An interface for replication connections."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_command(self, cmd: Command):
|
||||
"""Send the command down the connection"""
|
||||
pass
|
||||
|
||||
|
||||
# This tells python that `BaseReplicationStreamProtocol` implements the
|
||||
# interface.
|
||||
AbstractConnection.register(BaseReplicationStreamProtocol)
|
||||
|
||||
|
||||
# The following simply registers metrics for the replication connections
|
||||
|
||||
pending_commands = LaterGauge(
|
||||
|
||||
@@ -19,11 +19,6 @@ from typing import TYPE_CHECKING, Generic, Optional, Type, TypeVar, cast
|
||||
|
||||
import attr
|
||||
import txredisapi
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet.address import IPv4Address, IPv6Address
|
||||
from twisted.internet.interfaces import IAddress, IConnector
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
@@ -37,7 +32,7 @@ from synapse.replication.tcp.commands import (
|
||||
parse_command_from_line,
|
||||
)
|
||||
from synapse.replication.tcp.protocol import (
|
||||
IReplicationConnection,
|
||||
AbstractConnection,
|
||||
tcp_inbound_commands_counter,
|
||||
tcp_outbound_commands_counter,
|
||||
)
|
||||
@@ -67,8 +62,7 @@ class ConstantProperty(Generic[T, V]):
|
||||
pass
|
||||
|
||||
|
||||
@implementer(IReplicationConnection)
|
||||
class RedisSubscriber(txredisapi.SubscriberProtocol):
|
||||
class RedisSubscriber(txredisapi.SubscriberProtocol, AbstractConnection):
|
||||
"""Connection to redis subscribed to replication stream.
|
||||
|
||||
This class fulfils two functions:
|
||||
@@ -77,7 +71,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
|
||||
connection, parsing *incoming* messages into replication commands, and passing them
|
||||
to `ReplicationCommandHandler`
|
||||
|
||||
(b) it implements the IReplicationConnection API, where it sends *outgoing* commands
|
||||
(b) it implements the AbstractConnection API, where it sends *outgoing* commands
|
||||
onto outbound_redis_connection.
|
||||
|
||||
Due to the vagaries of `txredisapi` we don't want to have a custom
|
||||
@@ -259,37 +253,6 @@ class SynapseRedisFactory(txredisapi.RedisFactory):
|
||||
except Exception:
|
||||
logger.warning("Failed to send ping to a redis connection")
|
||||
|
||||
# ReconnectingClientFactory has some logging (if you enable `self.noisy`), but
|
||||
# it's rubbish. We add our own here.
|
||||
|
||||
def startedConnecting(self, connector: IConnector):
|
||||
logger.info(
|
||||
"Connecting to redis server %s", format_address(connector.getDestination())
|
||||
)
|
||||
super().startedConnecting(connector)
|
||||
|
||||
def clientConnectionFailed(self, connector: IConnector, reason: Failure):
|
||||
logger.info(
|
||||
"Connection to redis server %s failed: %s",
|
||||
format_address(connector.getDestination()),
|
||||
reason.value,
|
||||
)
|
||||
super().clientConnectionFailed(connector, reason)
|
||||
|
||||
def clientConnectionLost(self, connector: IConnector, reason: Failure):
|
||||
logger.info(
|
||||
"Connection to redis server %s lost: %s",
|
||||
format_address(connector.getDestination()),
|
||||
reason.value,
|
||||
)
|
||||
super().clientConnectionLost(connector, reason)
|
||||
|
||||
|
||||
def format_address(address: IAddress) -> str:
|
||||
if isinstance(address, (IPv4Address, IPv6Address)):
|
||||
return "%s:%i" % (address.host, address.port)
|
||||
return str(address)
|
||||
|
||||
|
||||
class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
|
||||
"""This is a reconnecting factory that connects to redis and immediately
|
||||
@@ -365,6 +328,6 @@ def lazyConnection(
|
||||
factory.continueTrying = reconnect
|
||||
|
||||
reactor = hs.get_reactor()
|
||||
reactor.connectTCP(host.encode(), port, factory, timeout=30, bindAddress=None)
|
||||
reactor.connectTCP(host, port, factory, 30)
|
||||
|
||||
return factory.handler
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user