Compare commits
133 Commits
v1.18.0
...
travis/gro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b024acffea | ||
|
|
acfb7c3b5d | ||
|
|
3c01724b33 | ||
|
|
5cf7c12995 | ||
|
|
408aef8276 | ||
|
|
2f4d60a5ba | ||
|
|
25e55d2598 | ||
|
|
8b6c176aee | ||
|
|
050e20e7ca | ||
|
|
e04e465b4d | ||
|
|
8390e00c7f | ||
|
|
3234d5c305 | ||
|
|
ea4e4d2f0b | ||
|
|
ad6190c925 | ||
|
|
ac77cdb64e | ||
|
|
b069b78bb4 | ||
|
|
e8861957d9 | ||
|
|
dc22090a67 | ||
|
|
6b7ce1d332 | ||
|
|
894dae74fe | ||
|
|
7bdf9828d5 | ||
|
|
bfd79c2988 | ||
|
|
93848f3c89 | ||
|
|
4550b77312 | ||
|
|
a69ba6f457 | ||
|
|
091ca3910d | ||
|
|
53834bb9c4 | ||
|
|
ff0e894656 | ||
|
|
dd8f28bd3f | ||
|
|
fbe930dad2 | ||
|
|
5ecc8b5825 | ||
|
|
5dd73d029e | ||
|
|
d68e10f308 | ||
|
|
a3a59bab7b | ||
|
|
9d1e4942ab | ||
|
|
6ba621d786 | ||
|
|
04faa0bfa9 | ||
|
|
a0acdfa9e9 | ||
|
|
fdb46b5442 | ||
|
|
c066928915 | ||
|
|
61d8ff0d44 | ||
|
|
3c796e4159 | ||
|
|
a1e9bb9eae | ||
|
|
8a3dac3c19 | ||
|
|
e1af09dccb | ||
|
|
0304ad0c3d | ||
|
|
a0f574f3c2 | ||
|
|
db131b6b22 | ||
|
|
64e5bb0dc8 | ||
|
|
0f1afbe8dc | ||
|
|
0cb169900e | ||
|
|
aa827b6ad7 | ||
|
|
39c3f68758 | ||
|
|
fcbab08cbd | ||
|
|
cdbb8e6d6e | ||
|
|
5c43c43240 | ||
|
|
1a3aabcf3f | ||
|
|
cee6c6012e | ||
|
|
7f837959ea | ||
|
|
f3fe6961b2 | ||
|
|
1048ed2afa | ||
|
|
de6f892065 | ||
|
|
2f9fd5ab00 | ||
|
|
4e874ed593 | ||
|
|
7620912d84 | ||
|
|
4dd27e6d11 | ||
|
|
367e9e6e9e | ||
|
|
bf33d5c457 | ||
|
|
2ffd6783c7 | ||
|
|
fe6cfc80ec | ||
|
|
d4a7829b12 | ||
|
|
c36228c403 | ||
|
|
66f24449dd | ||
|
|
118a9eafb3 | ||
|
|
dd11f575a2 | ||
|
|
079bc3c8e3 | ||
|
|
a7bdf98d01 | ||
|
|
0a86850ba3 | ||
|
|
8b786db323 | ||
|
|
7cac9006d6 | ||
|
|
8ff2deda72 | ||
|
|
88a3ff12f0 | ||
|
|
e19de43eb5 | ||
|
|
916cf2d439 | ||
|
|
481f76c7aa | ||
|
|
5d92a1428c | ||
|
|
6812509807 | ||
|
|
2a89ce8cd4 | ||
|
|
b6c6fb7950 | ||
|
|
3b415e23a5 | ||
|
|
db5970ac6d | ||
|
|
e2f1cccc8a | ||
|
|
1678057b56 | ||
|
|
d1008fe949 | ||
|
|
394be6a0e6 | ||
|
|
faba873d4b | ||
|
|
9b3ab57acd | ||
|
|
18de00adb4 | ||
|
|
cf42d0a60c | ||
|
|
79d991eff0 | ||
|
|
713d70d6c6 | ||
|
|
e2a4ba6f9b | ||
|
|
60328ce9fb | ||
|
|
69158e554f | ||
|
|
8b04c4cd70 | ||
|
|
6d4b790021 | ||
|
|
0a7fb24716 | ||
|
|
606805bf06 | ||
|
|
3aa36b782c | ||
|
|
c978f6c451 | ||
|
|
4cce8ef74e | ||
|
|
b3a97d6dac | ||
|
|
3950ae51ef | ||
|
|
a53e0160a2 | ||
|
|
d90087cffa | ||
|
|
3a00bd1378 | ||
|
|
f23c77389d | ||
|
|
8dff4a1242 | ||
|
|
2184f61fae | ||
|
|
3345c166a4 | ||
|
|
e866e3b896 | ||
|
|
9725c59247 | ||
|
|
8a25332d94 | ||
|
|
2c1e1b153d | ||
|
|
8078dec3be | ||
|
|
3857de2194 | ||
|
|
349119a340 | ||
|
|
aaf9ce72a0 | ||
|
|
c4ce0da6fe | ||
|
|
68626ff8e9 | ||
|
|
8553f46498 | ||
|
|
5f65e62681 | ||
|
|
8144bc26a7 |
@@ -4,18 +4,16 @@ jobs:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
4
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
@@ -4,12 +4,12 @@ about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
|
||||
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
|
||||
|
||||
<!--
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
|
||||
This is a bug report template. By following the instructions below and
|
||||
|
||||
88
CHANGES.md
88
CHANGES.md
@@ -1,3 +1,91 @@
|
||||
For the next release
|
||||
====================
|
||||
|
||||
Removal warning
|
||||
---------------
|
||||
|
||||
Some older clients used a
|
||||
[disallowed character](https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-register-email-requesttoken)
|
||||
(`:`) in the `client_secret` parameter of various endpoints. The incorrect
|
||||
behaviour was allowed for backwards compatibility, but is now being removed
|
||||
from Synapse as most users have updated their client. Further context can be
|
||||
found at [\#6766](https://github.com/matrix-org/synapse/issues/6766).
|
||||
|
||||
|
||||
Synapse 1.19.0 (2020-08-17)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.19.0rc1.
|
||||
|
||||
Removal warning
|
||||
---------------
|
||||
|
||||
As outlined in the [previous release](https://github.com/matrix-org/synapse/releases/tag/v1.18.0), we are no longer publishing Docker images with the `-py3` tag suffix. On top of that, we have also removed the `latest-py3` tag. Please see [the announcement in the upgrade notes for 1.18.0](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180).
|
||||
|
||||
|
||||
Synapse 1.19.0rc1 (2020-08-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add option to allow server admins to join rooms which fail complexity checks. Contributed by @lugino-emeritus. ([\#7902](https://github.com/matrix-org/synapse/issues/7902))
|
||||
- Add an option to purge room or not with delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel. ([\#7964](https://github.com/matrix-org/synapse/issues/7964))
|
||||
- Add rate limiting to users joining rooms. ([\#8008](https://github.com/matrix-org/synapse/issues/8008))
|
||||
- Add a `/health` endpoint to every configured HTTP listener that can be used as a health check endpoint by load balancers. ([\#8048](https://github.com/matrix-org/synapse/issues/8048))
|
||||
- Allow login to be blocked based on the values of SAML attributes. ([\#8052](https://github.com/matrix-org/synapse/issues/8052))
|
||||
- Allow guest access to the `GET /_matrix/client/r0/rooms/{room_id}/members` endpoint, according to MSC2689. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#7314](https://github.com/matrix-org/synapse/issues/7314))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977))
|
||||
- Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978))
|
||||
- Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980))
|
||||
- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996))
|
||||
- Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999))
|
||||
- Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- We no longer publish Docker images with the `-py3` tag suffix, as [announced in the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180). ([\#8056](https://github.com/matrix-org/synapse/issues/8056))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Document how to set up a client .well-known file and fix several pieces of outdated documentation. ([\#7899](https://github.com/matrix-org/synapse/issues/7899))
|
||||
- Improve workers docs. ([\#7990](https://github.com/matrix-org/synapse/issues/7990), [\#8000](https://github.com/matrix-org/synapse/issues/8000))
|
||||
- Fix typo in `docs/workers.md`. ([\#7992](https://github.com/matrix-org/synapse/issues/7992))
|
||||
- Add documentation for how to undo a room shutdown. ([\#7998](https://github.com/matrix-org/synapse/issues/7998), [\#8010](https://github.com/matrix-org/synapse/issues/8010))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Reduce the amount of whitespace in JSON stored and sent in responses. Contributed by David Vo. ([\#7372](https://github.com/matrix-org/synapse/issues/7372))
|
||||
- Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0. ([\#7936](https://github.com/matrix-org/synapse/issues/7936), [\#7979](https://github.com/matrix-org/synapse/issues/7979))
|
||||
- Convert various parts of the codebase to async/await. ([\#7947](https://github.com/matrix-org/synapse/issues/7947), [\#7948](https://github.com/matrix-org/synapse/issues/7948), [\#7949](https://github.com/matrix-org/synapse/issues/7949), [\#7951](https://github.com/matrix-org/synapse/issues/7951), [\#7963](https://github.com/matrix-org/synapse/issues/7963), [\#7973](https://github.com/matrix-org/synapse/issues/7973), [\#7975](https://github.com/matrix-org/synapse/issues/7975), [\#7976](https://github.com/matrix-org/synapse/issues/7976), [\#7981](https://github.com/matrix-org/synapse/issues/7981), [\#7987](https://github.com/matrix-org/synapse/issues/7987), [\#7989](https://github.com/matrix-org/synapse/issues/7989), [\#8003](https://github.com/matrix-org/synapse/issues/8003), [\#8014](https://github.com/matrix-org/synapse/issues/8014), [\#8016](https://github.com/matrix-org/synapse/issues/8016), [\#8027](https://github.com/matrix-org/synapse/issues/8027), [\#8031](https://github.com/matrix-org/synapse/issues/8031), [\#8032](https://github.com/matrix-org/synapse/issues/8032), [\#8035](https://github.com/matrix-org/synapse/issues/8035), [\#8042](https://github.com/matrix-org/synapse/issues/8042), [\#8044](https://github.com/matrix-org/synapse/issues/8044), [\#8045](https://github.com/matrix-org/synapse/issues/8045), [\#8061](https://github.com/matrix-org/synapse/issues/8061), [\#8062](https://github.com/matrix-org/synapse/issues/8062), [\#8063](https://github.com/matrix-org/synapse/issues/8063), [\#8066](https://github.com/matrix-org/synapse/issues/8066), [\#8069](https://github.com/matrix-org/synapse/issues/8069), [\#8070](https://github.com/matrix-org/synapse/issues/8070))
|
||||
- Move some database-related log lines from the default logger to the database/transaction loggers. ([\#7952](https://github.com/matrix-org/synapse/issues/7952))
|
||||
- Add a script to detect source code files using non-unix line terminators. ([\#7965](https://github.com/matrix-org/synapse/issues/7965), [\#7970](https://github.com/matrix-org/synapse/issues/7970))
|
||||
- Log the SAML session ID during creation. ([\#7971](https://github.com/matrix-org/synapse/issues/7971))
|
||||
- Implement new experimental push rules for some users. ([\#7997](https://github.com/matrix-org/synapse/issues/7997))
|
||||
- Remove redundant and unreliable signature check for v1 Identity Service lookup responses. ([\#8001](https://github.com/matrix-org/synapse/issues/8001))
|
||||
- Improve the performance of the register endpoint. ([\#8009](https://github.com/matrix-org/synapse/issues/8009))
|
||||
- Reduce less useful output in the newsfragment CI step. Add a link to the changelog section of the contributing guide on error. ([\#8024](https://github.com/matrix-org/synapse/issues/8024))
|
||||
- Rename storage layer objects to be more sensible. ([\#8033](https://github.com/matrix-org/synapse/issues/8033))
|
||||
- Change the default log config to reduce disk I/O and storage for new servers. ([\#8040](https://github.com/matrix-org/synapse/issues/8040))
|
||||
- Add an assertion on `prev_events` in `create_new_client_event`. ([\#8041](https://github.com/matrix-org/synapse/issues/8041))
|
||||
- Add a comment to `ServerContextFactory` about the use of `SSLv23_METHOD`. ([\#8043](https://github.com/matrix-org/synapse/issues/8043))
|
||||
- Log `OPTIONS` requests at `DEBUG` rather than `INFO` level to reduce amount logged at `INFO`. ([\#8049](https://github.com/matrix-org/synapse/issues/8049))
|
||||
- Reduce amount of outbound request logging at `INFO` level. ([\#8050](https://github.com/matrix-org/synapse/issues/8050))
|
||||
- It is no longer necessary to explicitly define `filters` in the logging configuration. (Continuing to do so is redundant but harmless.) ([\#8051](https://github.com/matrix-org/synapse/issues/8051))
|
||||
- Add and improve type hints. ([\#8058](https://github.com/matrix-org/synapse/issues/8058), [\#8064](https://github.com/matrix-org/synapse/issues/8064), [\#8060](https://github.com/matrix-org/synapse/issues/8060), [\#8067](https://github.com/matrix-org/synapse/issues/8067))
|
||||
|
||||
|
||||
Synapse 1.18.0 (2020-07-30)
|
||||
===========================
|
||||
|
||||
|
||||
109
INSTALL.md
109
INSTALL.md
@@ -1,10 +1,12 @@
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Picking a database engine](#picking-a-database-engine)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
@@ -27,6 +29,25 @@ that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](docs/federate.md).
|
||||
|
||||
# Picking a database engine
|
||||
|
||||
Synapse offers two database engines:
|
||||
* [PostgreSQL](https://www.postgresql.org)
|
||||
* [SQLite](https://sqlite.org/)
|
||||
|
||||
Almost all installations should opt to use PostgreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
By default Synapse uses SQLite and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
## Installing from source
|
||||
@@ -234,9 +255,9 @@ for a number of platforms.
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||
information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
@@ -244,7 +265,8 @@ https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
@@ -277,22 +299,27 @@ The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
#### Downstream Debian/Ubuntu packages
|
||||
#### Downstream Debian packages
|
||||
|
||||
For `buster` and `sid`, Synapse is available in the Debian repositories and
|
||||
it should be possible to install it with simply:
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
vulnerabilities. You can install the latest version of Synapse from
|
||||
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||
for information on how to use backports.
|
||||
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
|
||||
the [Debian documentation on
|
||||
backports](https://backports.debian.org/Instructions/) for information on how
|
||||
to use them.
|
||||
#### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in downstream Ubuntu at this time, as
|
||||
they are old and suffer from known security vulnerabilities.
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
### Fedora
|
||||
|
||||
@@ -419,6 +446,60 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
## Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
which support well-known lookup to automatically configure the homeserver and
|
||||
identity server URLs. This is useful so that users don't have to memorize or think
|
||||
about the actual homeserver URL you are using.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
},
|
||||
"m.identity_server": {
|
||||
"base_url": "https://<identity.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To work in browser based clients, the file must be served with the appropriate
|
||||
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
```
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
|
||||
correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
## Email
|
||||
|
||||
@@ -437,7 +518,7 @@ email will be disabled.
|
||||
|
||||
## Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively you can do so from the command line if you have installed via pip.
|
||||
|
||||
|
||||
43
README.rst
43
README.rst
@@ -45,7 +45,7 @@ which handle:
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption[1]
|
||||
end-to-end encryption
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
@@ -82,9 +82,6 @@ at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
@@ -115,12 +112,11 @@ Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Riot at
|
||||
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
(Leave the identity server as the default - see `Identity servers`_.)
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
@@ -137,7 +133,7 @@ it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||
user via a Matrix client.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
@@ -183,30 +179,6 @@ versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
Synapse offers two database engines:
|
||||
* `PostgreSQL <https://www.postgresql.org>`_
|
||||
* `SQLite <https://sqlite.org/>`_
|
||||
|
||||
Almost all installations should opt to use PostgreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.md <docs/postgres.md>`_.
|
||||
|
||||
By default Synapse uses SQLite and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
@@ -255,10 +227,9 @@ email address.
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Riot.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
|
||||
or by directly editing the database as shown below.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
|
||||
1
changelog.d/7864.bugfix
Normal file
1
changelog.d/7864.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a memory leak by limiting the length of time that messages will be queued for a remote server that has been unreachable.
|
||||
1
changelog.d/8013.feature
Normal file
1
changelog.d/8013.feature
Normal file
@@ -0,0 +1 @@
|
||||
Iteratively encode JSON to avoid blocking the reactor.
|
||||
1
changelog.d/8037.feature
Normal file
1
changelog.d/8037.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8072.misc
Normal file
1
changelog.d/8072.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8074.misc
Normal file
1
changelog.d/8074.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8075.misc
Normal file
1
changelog.d/8075.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8076.misc
Normal file
1
changelog.d/8076.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8081.bugfix
Normal file
1
changelog.d/8081.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix `Re-starting finished log context PUT-nnnn` warning when event persistence failed.
|
||||
1
changelog.d/8085.misc
Normal file
1
changelog.d/8085.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove some unused database functions.
|
||||
1
changelog.d/8087.misc
Normal file
1
changelog.d/8087.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8090.misc
Normal file
1
changelog.d/8090.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.handlers.room`.
|
||||
1
changelog.d/8092.feature
Normal file
1
changelog.d/8092.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for shadow-banning users (ignoring any message send requests).
|
||||
1
changelog.d/8093.misc
Normal file
1
changelog.d/8093.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
1
changelog.d/8100.misc
Normal file
1
changelog.d/8100.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8101.bugfix
Normal file
1
changelog.d/8101.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now correctly enforces the valid characters in the `client_secret` parameter used in various endpoints.
|
||||
1
changelog.d/8107.feature
Normal file
1
changelog.d/8107.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8111.doc
Normal file
1
changelog.d/8111.doc
Normal file
@@ -0,0 +1 @@
|
||||
Link to matrix-synapse-rest-password-provider in the password provider documentation.
|
||||
1
changelog.d/8112.misc
Normal file
1
changelog.d/8112.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
@@ -609,13 +609,15 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_event_stream(self, timeout):
|
||||
res = yield self.http_client.get_json(
|
||||
self._url() + "/events",
|
||||
{
|
||||
"access_token": self._tok(),
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token,
|
||||
},
|
||||
res = yield defer.ensureDeferred(
|
||||
self.http_client.get_json(
|
||||
self._url() + "/events",
|
||||
{
|
||||
"access_token": self._tok(),
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token,
|
||||
},
|
||||
)
|
||||
)
|
||||
print(json.dumps(res, indent=4))
|
||||
|
||||
|
||||
10
debian/changelog
vendored
10
debian/changelog
vendored
@@ -1,3 +1,13 @@
|
||||
matrix-synapse-py3 (1.19.0) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.19.0.
|
||||
|
||||
[ Aaron Raimist ]
|
||||
* Fix outdated documentation for SYNAPSE_CACHE_FACTOR
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Aug 2020 14:06:42 +0100
|
||||
|
||||
matrix-synapse-py3 (1.18.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.18.0.
|
||||
|
||||
2
debian/matrix-synapse.default
vendored
2
debian/matrix-synapse.default
vendored
@@ -1,2 +1,2 @@
|
||||
# Specify environment variables used when running Synapse
|
||||
# SYNAPSE_CACHE_FACTOR=1 (default)
|
||||
# SYNAPSE_CACHE_FACTOR=0.5 (default)
|
||||
|
||||
27
debian/synctl.ronn
vendored
27
debian/synctl.ronn
vendored
@@ -46,19 +46,20 @@ Configuration file may be generated as follows:
|
||||
## ENVIRONMENT
|
||||
|
||||
* `SYNAPSE_CACHE_FACTOR`:
|
||||
Synapse's architecture is quite RAM hungry currently - a lot of
|
||||
recent room data and metadata is deliberately cached in RAM in
|
||||
order to speed up common requests. This will be improved in
|
||||
future, but for now the easiest way to either reduce the RAM usage
|
||||
(at the risk of slowing things down) is to set the
|
||||
SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
|
||||
SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
|
||||
resident memory - this is what we currently run the matrix.org
|
||||
on. The default setting is currently 0.1, which is probably around
|
||||
a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it
|
||||
up if you need performance for lots of users and have a box with a
|
||||
lot of RAM.
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
## COPYRIGHT
|
||||
|
||||
|
||||
@@ -4,16 +4,10 @@ formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
|
||||
@@ -10,5 +10,16 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
# s = second
|
||||
# m = minute
|
||||
# h = hour
|
||||
# d = day
|
||||
# w = week
|
||||
# y = year
|
||||
# For example, setting redaction_retention_period: 5m would remove redacted
|
||||
# messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
@@ -369,7 +369,9 @@ to the new room will have power level `-10` by default, and thus be unable to sp
|
||||
If `block` is `True` it prevents new joins to the old room.
|
||||
|
||||
This API will remove all trace of the old room from your database after removing
|
||||
all local users.
|
||||
all local users. If `purge` is `true` (the default), all traces of the old room will
|
||||
be removed from your database after removing all local users. If you do not want
|
||||
this to happen, set `purge` to `false`.
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer.
|
||||
|
||||
@@ -388,7 +390,8 @@ with a body of:
|
||||
"new_room_user_id": "@someuser:example.com",
|
||||
"room_name": "Content Violation Notification",
|
||||
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
|
||||
"block": true
|
||||
"block": true,
|
||||
"purge": true
|
||||
}
|
||||
```
|
||||
|
||||
@@ -430,8 +433,10 @@ The following JSON body parameters are available:
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down. Defaults to `Sharing illegal content on this server
|
||||
is not permitted and rooms in violation will be blocked.`
|
||||
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing future attempts to
|
||||
join the room. Defaults to `false`.
|
||||
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
|
||||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ You will need to authenticate with an access token for an admin user.
|
||||
* `message` - Optional. A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down.
|
||||
|
||||
|
||||
If not specified, the default value of `room_name` is "Content Violation
|
||||
Notification". The default value of `message` is "Sharing illegal content on
|
||||
othis server is not permitted and rooms in violation will be blocked."
|
||||
@@ -72,3 +72,30 @@ Response:
|
||||
"new_room_id": "!newroomid:example.com",
|
||||
},
|
||||
```
|
||||
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
different thread to Synapse. This can make it more resilient to
|
||||
heavy load meaning metrics cannot be retrieved, and can be exposed
|
||||
to just internal networks easier. The served metrics are available
|
||||
over HTTP only, and will be available at `/`.
|
||||
over HTTP only, and will be available at `/_synapse/metrics`.
|
||||
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ password auth provider module implementations:
|
||||
|
||||
* [matrix-synapse-ldap3](https://github.com/matrix-org/matrix-synapse-ldap3/)
|
||||
* [matrix-synapse-shared-secret-auth](https://github.com/devture/matrix-synapse-shared-secret-auth)
|
||||
* [matrix-synapse-rest-password-provider](https://github.com/ma1uta/matrix-synapse-rest-password-provider)
|
||||
|
||||
## Required methods
|
||||
|
||||
|
||||
@@ -188,6 +188,9 @@ to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
Note that the database may take up significantly more (25% - 100% more)
|
||||
space on disk after porting to Postgres.
|
||||
|
||||
### Using the port script
|
||||
|
||||
Firstly, shut down the currently running synapse server and copy its
|
||||
|
||||
@@ -139,3 +139,10 @@ client IP addresses are recorded correctly.
|
||||
Having done so, you can then use `https://matrix.example.com` (instead
|
||||
of `https://matrix.example.com:8448`) as the "Custom server" when
|
||||
connecting to Synapse from a client.
|
||||
|
||||
|
||||
## Health check endpoint
|
||||
|
||||
Synapse exposes a health check endpoint for use by reverse proxies.
|
||||
Each configured HTTP listener has a `/health` endpoint which always returns
|
||||
200 OK (and doesn't get logged).
|
||||
|
||||
@@ -10,6 +10,17 @@
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
# s = second
|
||||
# m = minute
|
||||
# h = hour
|
||||
# d = day
|
||||
# w = week
|
||||
# y = year
|
||||
# For example, setting redaction_retention_period: 5m would remove redacted
|
||||
# messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
################################################################################
|
||||
|
||||
# Configuration file for Synapse.
|
||||
@@ -314,6 +325,10 @@ limit_remote_rooms:
|
||||
#
|
||||
#complexity_error: "This room is too complex."
|
||||
|
||||
# allow server admins to join complex rooms. Default is false.
|
||||
#
|
||||
#admins_can_join: true
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
@@ -731,6 +746,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
# - one for ratelimiting redactions by room admins. If this is not explicitly
|
||||
# set then it uses the same ratelimiting as per rc_message. This is useful
|
||||
# to allow room admins to deal with abuse quickly.
|
||||
# - two for ratelimiting number of rooms a user can join, "local" for when
|
||||
# users are joining rooms the server is already in (this is cheap) vs
|
||||
# "remote" for when users are trying to join rooms not on the server (which
|
||||
# can be more expensive)
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
@@ -756,6 +775,14 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#rc_admin_redaction:
|
||||
# per_second: 1
|
||||
# burst_count: 50
|
||||
#
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
|
||||
|
||||
# Ratelimiting settings for incoming federation
|
||||
@@ -1145,24 +1172,6 @@ account_validity:
|
||||
#
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
|
||||
# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
|
||||
# background migration script, informing itself that the identity server all of its
|
||||
# 3PIDs have been bound to is likely one of the below.
|
||||
#
|
||||
# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
|
||||
# it is now solely used for the purposes of the background migration script, and can be
|
||||
# removed once it has run.
|
||||
#trusted_third_party_id_servers:
|
||||
# - matrix.org
|
||||
# - vector.im
|
||||
|
||||
# Handle threepid (email/phone etc) registration and password resets through a set of
|
||||
# *trusted* identity servers. Note that this allows the configured identity server to
|
||||
# reset passwords for accounts!
|
||||
@@ -1568,6 +1577,17 @@ saml2_config:
|
||||
#
|
||||
#grandfathered_mxid_source_attribute: upn
|
||||
|
||||
# It is possible to configure Synapse to only allow logins if SAML attributes
|
||||
# match particular values. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted.
|
||||
#
|
||||
#attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "staff"
|
||||
# - attribute: department
|
||||
# value: "sales"
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
@@ -1982,9 +2002,7 @@ email:
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
# Do not uncomment this setting unless you want to customise the templates.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
|
||||
@@ -11,24 +11,33 @@ formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: /var/log/matrix-synapse/homeserver.log
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
when: midnight
|
||||
backupCount: 3 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# A handler that writes logs to stderr. Unused by default, but can be used
|
||||
# instead of "buffer" and "file" in the logger handlers.
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
@@ -36,8 +45,23 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [file, console]
|
||||
|
||||
# Write logs to the `buffer` handler, which will buffer them together in memory,
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuation for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
||||
disable_existing_loggers: false
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
worker_app: synapse.app.federation_reader
|
||||
worker_name: federation_reader1
|
||||
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
|
||||
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
101
docs/workers.md
101
docs/workers.md
@@ -1,10 +1,10 @@
|
||||
# Scaling synapse via workers
|
||||
|
||||
For small instances it recommended to run Synapse in monolith mode (the
|
||||
default). For larger instances where performance is a concern it can be helpful
|
||||
to split out functionality into multiple separate python processes. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
For small instances it recommended to run Synapse in the default monolith mode.
|
||||
For larger instances where performance is a concern it can be helpful to split
|
||||
out functionality into multiple separate python processes. These processes are
|
||||
called 'workers', and are (eventually) intended to scale horizontally
|
||||
independently.
|
||||
|
||||
Synapse's worker support is under active development and subject to change as
|
||||
we attempt to rapidly scale ever larger Synapse instances. However we are
|
||||
@@ -23,29 +23,30 @@ The processes communicate with each other via a Synapse-specific protocol called
|
||||
feeds streams of newly written data between processes so they can be kept in
|
||||
sync with the database state.
|
||||
|
||||
Additionally, processes may make HTTP requests to each other. Typically this is
|
||||
used for operations which need to wait for a reply - such as sending an event.
|
||||
When configured to do so, Synapse uses a
|
||||
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
|
||||
stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
|
||||
As of Synapse v1.13.0, it is possible to configure Synapse to send replication
|
||||
via a [Redis pub/sub channel](https://redis.io/topics/pubsub), and is now the
|
||||
recommended way of configuring replication. This is an alternative to the old
|
||||
direct TCP connections to the main process: rather than all the workers
|
||||
connecting to the main process, all the workers and the main process connect to
|
||||
Redis, which relays replication commands between processes. This can give a
|
||||
significant cpu saving on the main process and will be a prerequisite for
|
||||
upcoming performance improvements.
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
(See the [Architectural diagram](#architectural-diagram) section at the end for
|
||||
a visualisation of what this looks like)
|
||||
See the [Architectural diagram](#architectural-diagram) section at the end for
|
||||
a visualisation of what this looks like.
|
||||
|
||||
|
||||
## Setting up workers
|
||||
|
||||
A Redis server is required to manage the communication between the processes.
|
||||
(The older direct TCP connections are now deprecated.) The Redis server
|
||||
should be installed following the normal procedure for your distribution (e.g.
|
||||
`apt install redis-server` on Debian). It is safe to use an existing Redis
|
||||
deployment if you have one.
|
||||
The Redis server should be installed following the normal procedure for your
|
||||
distribution (e.g. `apt install redis-server` on Debian). It is safe to use an
|
||||
existing Redis deployment if you have one.
|
||||
|
||||
Once installed, check that Redis is running and accessible from the host running
|
||||
Synapse, for example by executing `echo PING | nc -q1 localhost 6379` and seeing
|
||||
@@ -65,18 +66,31 @@ https://hub.docker.com/r/matrixdotorg/synapse/.
|
||||
|
||||
To make effective use of the workers, you will need to configure an HTTP
|
||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||
the correct worker, or to the main synapse instance. See [reverse_proxy.md](reverse_proxy.md)
|
||||
for information on setting up a reverse proxy.
|
||||
the correct worker, or to the main synapse instance. See
|
||||
[reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
|
||||
proxy.
|
||||
|
||||
To enable workers you should create a configuration file for each worker
|
||||
process. Each worker configuration file inherits the configuration of the shared
|
||||
homeserver configuration file. You can then override configuration specific to
|
||||
that worker, e.g. the HTTP listener that it provides (if any); logging
|
||||
configuration; etc. You should minimise the number of overrides though to
|
||||
maintain a usable config.
|
||||
When using workers, each worker process has its own configuration file which
|
||||
contains settings specific to that worker, such as the HTTP listener that it
|
||||
provides (if any), logging configuration, etc.
|
||||
|
||||
Normally, the worker processes are configured to read from a shared
|
||||
configuration file as well as the worker-specific configuration files. This
|
||||
makes it easier to keep common configuration settings synchronised across all
|
||||
the processes.
|
||||
|
||||
The main process is somewhat special in this respect: it does not normally
|
||||
need its own configuration file and can take all of its configuration from the
|
||||
shared configuration file.
|
||||
|
||||
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. For example:
|
||||
|
||||
Next you need to add both a HTTP replication listener and redis config to the
|
||||
shared Synapse configuration file (`homeserver.yaml`). For example:
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
@@ -98,6 +112,9 @@ See the sample config for the full documentation of each option.
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; it has no authentication and is unencrypted.
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify the type of worker
|
||||
application (`worker_app`), and you should specify a unqiue name for the worker
|
||||
(`worker_name`). The currently available worker applications are listed below.
|
||||
@@ -136,6 +153,9 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
`synctl` or your distribution's preferred service manager such as `systemd`. We
|
||||
recommend the use of `systemd` where available: for information on setting up
|
||||
@@ -278,7 +298,7 @@ instance_map:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
streams_writers:
|
||||
stream_writers:
|
||||
events: event_persister1
|
||||
```
|
||||
|
||||
@@ -398,6 +418,23 @@ all these to be folded into the `generic_worker` app and to use config to define
|
||||
which processes handle the various proccessing such as push notifications.
|
||||
|
||||
|
||||
## Migration from old config
|
||||
|
||||
There are two main independent changes that have been made: introducing Redis
|
||||
support and merging apps into `synapse.app.generic_worker`. Both these changes
|
||||
are backwards compatible and so no changes to the config are required, however
|
||||
server admins are encouraged to plan to migrate to Redis as the old style direct
|
||||
TCP replication config is deprecated.
|
||||
|
||||
To migrate to Redis add the `redis` config as above, and optionally remove the
|
||||
TCP `replication` listener from master and `worker_replication_port` from worker
|
||||
config.
|
||||
|
||||
To migrate apps to use `synapse.app.generic_worker` simply update the
|
||||
`worker_app` option in the worker configs, and where worker are started (e.g.
|
||||
in systemd service files, but not required for synctl).
|
||||
|
||||
|
||||
## Architectural diagram
|
||||
|
||||
The following shows an example setup using Redis and a reverse proxy:
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -81,3 +81,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-nacl.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
echo -e "+++ \033[32mChecking newsfragment\033[m"
|
||||
|
||||
set -e
|
||||
|
||||
# make sure that origin/develop is up to date
|
||||
@@ -16,6 +18,8 @@ pr="$BUILDKITE_PULL_REQUEST"
|
||||
if ! git diff --quiet FETCH_HEAD... -- debian; then
|
||||
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
|
||||
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@@ -26,7 +30,12 @@ if ! git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
tox -qe check-newsfragment
|
||||
# Print a link to the contributing guide if the user makes a mistake
|
||||
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
|
||||
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
|
||||
|
||||
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
|
||||
tox -qe check-newsfragment || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)
|
||||
|
||||
echo
|
||||
echo "--------------------------"
|
||||
@@ -38,6 +47,7 @@ for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
|
||||
lastchar=`tr -d '\n' < $f | tail -c 1`
|
||||
if [ $lastchar != '.' -a $lastchar != '!' ]; then
|
||||
echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
|
||||
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -47,5 +57,6 @@ done
|
||||
|
||||
if [[ -n "$pr" && "$matched" -eq 0 ]]; then
|
||||
echo -e "\e[31mERROR: Did not find a news fragment with the right number: expected changelog.d/$pr.*.\e[39m" >&2
|
||||
echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
34
scripts-dev/check_line_terminators.sh
Executable file
34
scripts-dev/check_line_terminators.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# This script checks that line terminators in all repository files (excluding
|
||||
# those in the .git directory) feature unix line terminators.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# ./check_line_terminators.sh
|
||||
#
|
||||
# The script will emit exit code 1 if any files that do not use unix line
|
||||
# terminators are found, 0 otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Find and print files with non-unix line terminators
|
||||
if find . -path './.git/*' -prune -o -type f -print0 | xargs -0 grep -I -l $'\r$'; then
|
||||
echo -e '\e[31mERROR: found files with CRLF line endings. See above.\e[39m'
|
||||
exit 1
|
||||
fi
|
||||
@@ -40,7 +40,7 @@ class MockHomeserver(HomeServer):
|
||||
config.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
self.version_string = "Synapse/"+get_version_string(synapse)
|
||||
self.version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -86,7 +86,7 @@ if __name__ == "__main__":
|
||||
store = hs.get_datastore()
|
||||
|
||||
async def run_background_updates():
|
||||
await store.db.updates.run_background_updates(sleep=False)
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
|
||||
@@ -35,31 +35,29 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.deviceinbox import (
|
||||
DeviceInboxBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.events_bg_updates import (
|
||||
from synapse.storage.database import DatabasePool, make_conn
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.media_repository import (
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.registration import (
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
)
|
||||
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.stats import StatsStore
|
||||
from synapse.storage.data_stores.main.user_directory import (
|
||||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.user_directory import (
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.database import Database, make_conn
|
||||
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.util import Clock
|
||||
@@ -175,14 +173,14 @@ class Store(
|
||||
StatsStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.db.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
def execute_sql(self, sql, *args):
|
||||
def r(txn):
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
|
||||
return self.db.runInteraction("execute_sql", r)
|
||||
return self.db_pool.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
@@ -227,7 +225,7 @@ class Porter(object):
|
||||
async def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = await self.postgres_store.db.simple_select_one(
|
||||
row = await self.postgres_store.db_pool.simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
@@ -244,7 +242,7 @@ class Porter(object):
|
||||
) = await self._setup_sent_transactions()
|
||||
backward_chunk = 0
|
||||
else:
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
@@ -274,7 +272,7 @@ class Porter(object):
|
||||
|
||||
await self.postgres_store.execute(delete_all)
|
||||
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||
)
|
||||
@@ -318,7 +316,7 @@ class Porter(object):
|
||||
if table == "user_directory_stream_pos":
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table=table, values={"stream_id": None}
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
@@ -359,7 +357,7 @@ class Porter(object):
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = await self.sqlite_store.db.runInteraction(
|
||||
headers, frows, brows = await self.sqlite_store.db_pool.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
@@ -375,7 +373,7 @@ class Porter(object):
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
self.postgres_store.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
@@ -413,7 +411,7 @@ class Porter(object):
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
@@ -451,7 +449,7 @@ class Porter(object):
|
||||
],
|
||||
)
|
||||
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
self.postgres_store.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
@@ -494,7 +492,7 @@ class Porter(object):
|
||||
db_conn, allow_outdated_version=allow_outdated_version
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
store = Store(Database(hs, db_config, engine), db_conn, hs)
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs)
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
@@ -502,7 +500,7 @@ class Porter(object):
|
||||
async def run_background_updates_on_postgres(self):
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = (
|
||||
await self.postgres_store.db.updates.has_completed_background_updates()
|
||||
await self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
if not postgres_ready:
|
||||
@@ -511,9 +509,9 @@ class Porter(object):
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
await self.postgres_store.db.updates.do_next_background_update(100)
|
||||
await self.postgres_store.db_pool.updates.do_next_background_update(100)
|
||||
postgres_ready = await (
|
||||
self.postgres_store.db.updates.has_completed_background_updates()
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
async def run(self):
|
||||
@@ -534,7 +532,7 @@ class Porter(object):
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
await self.sqlite_store.db.updates.has_completed_background_updates()
|
||||
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
if not updates_complete:
|
||||
end_error = (
|
||||
@@ -576,22 +574,24 @@ class Porter(object):
|
||||
)
|
||||
|
||||
try:
|
||||
await self.postgres_store.db.runInteraction("alter_table", alter_table)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception:
|
||||
# On Error Resume Next
|
||||
pass
|
||||
|
||||
await self.postgres_store.db.runInteraction(
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = await self.sqlite_store.db.simple_select_onecol(
|
||||
sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol(
|
||||
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||
)
|
||||
|
||||
postgres_tables = await self.postgres_store.db.simple_select_onecol(
|
||||
postgres_tables = await self.postgres_store.db_pool.simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
@@ -692,7 +692,7 @@ class Porter(object):
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = await self.sqlite_store.db.runInteraction("select", r)
|
||||
headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r)
|
||||
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
@@ -725,7 +725,7 @@ class Porter(object):
|
||||
next_chunk = await self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||
|
||||
await self.postgres_store.db.simple_insert(
|
||||
await self.postgres_store.db_pool.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
@@ -794,14 +794,14 @@ class Porter(object):
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.db.runInteraction("setup_state_group_id_seq", r)
|
||||
return self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
def _setup_user_id_seq(self):
|
||||
def r(txn):
|
||||
next_id = find_max_generated_user_id_localpart(txn) + 1
|
||||
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.db.runInteraction("setup_user_id_seq", r)
|
||||
return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
|
||||
##############################################
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
""" This is a reference implementation of a Matrix homeserver.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -25,6 +26,9 @@ if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||
# actually start Synapse will import these libraries fine.
|
||||
try:
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet.protocol import Factory
|
||||
@@ -36,7 +40,15 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.18.0"
|
||||
# Use the standard library json implementation instead of simplejson.
|
||||
try:
|
||||
from canonicaljson import set_json_library
|
||||
|
||||
set_json_library(json)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.19.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -13,12 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse.types
|
||||
@@ -80,13 +79,14 @@ class Auth(object):
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids()
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
):
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
@@ -94,14 +94,13 @@ class Auth(object):
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_in_room(
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
current_state: Optional[StateMap[EventBase]] = None,
|
||||
allow_departed_users: bool = False,
|
||||
):
|
||||
) -> EventBase:
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
@@ -119,37 +118,35 @@ class Auth(object):
|
||||
Raises:
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
Deferred[Optional[EventBase]]:
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
"""
|
||||
if current_state:
|
||||
member = current_state.get((EventTypes.Member, user_id), None)
|
||||
else:
|
||||
member = yield defer.ensureDeferred(
|
||||
self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
member = await self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
if member:
|
||||
membership = member.membership
|
||||
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = yield self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = await self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
return member
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
async def check_host_in_room(self, room_id, host):
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
latest_event_ids = await self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
@@ -160,14 +157,13 @@ class Auth(object):
|
||||
def get_public_keys(self, invite_event):
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_req(
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: Request,
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
):
|
||||
) -> synapse.types.Requester:
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -180,7 +176,7 @@ class Auth(object):
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
defer.Deferred: resolves to a `synapse.types.Requester` object
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
@@ -194,14 +190,14 @@ class Auth(object):
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
user_id, app_service = await self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
yield self.store.insert_client_ip(
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -211,17 +207,18 @@ class Auth(object):
|
||||
|
||||
return synapse.types.create_requester(user_id, app_service=app_service)
|
||||
|
||||
user_info = yield self.get_user_by_access_token(
|
||||
user_info = await self.get_user_by_access_token(
|
||||
access_token, rights, allow_expired=allow_expired
|
||||
)
|
||||
user = user_info["user"]
|
||||
token_id = user_info["token_id"]
|
||||
is_guest = user_info["is_guest"]
|
||||
shadow_banned = user_info["shadow_banned"]
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
user_id = user.to_string()
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
|
||||
if (
|
||||
expiration_ts is not None
|
||||
and self.clock.time_msec() >= expiration_ts
|
||||
@@ -235,7 +232,7 @@ class Auth(object):
|
||||
device_id = user_info.get("device_id")
|
||||
|
||||
if user and access_token and ip_addr:
|
||||
yield self.store.insert_client_ip(
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -256,13 +253,17 @@ class Auth(object):
|
||||
opentracing.set_tag("device_id", device_id)
|
||||
|
||||
return synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
user,
|
||||
token_id,
|
||||
is_guest,
|
||||
shadow_banned,
|
||||
device_id,
|
||||
app_service=app_service,
|
||||
)
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
async def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
@@ -283,14 +284,13 @@ class Auth(object):
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(403, "Application service cannot masquerade as this user.")
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
if not (await self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(403, "Application service has not registered this user")
|
||||
return user_id, app_service
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(
|
||||
async def get_user_by_access_token(
|
||||
self, token: str, rights: str = "access", allow_expired: bool = False,
|
||||
):
|
||||
) -> dict:
|
||||
""" Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
@@ -300,9 +300,10 @@ class Auth(object):
|
||||
allow_expired: If False, raises an InvalidClientTokenError
|
||||
if the token is expired
|
||||
Returns:
|
||||
Deferred[dict]: dict that includes:
|
||||
dict that includes:
|
||||
`user` (UserID)
|
||||
`is_guest` (bool)
|
||||
`shadow_banned` (bool)
|
||||
`token_id` (int|None): access token id. May be None if guest
|
||||
`device_id` (str|None): device corresponding to access token
|
||||
Raises:
|
||||
@@ -314,7 +315,7 @@ class Auth(object):
|
||||
|
||||
if rights == "access":
|
||||
# first look in the database
|
||||
r = yield self._look_up_user_by_access_token(token)
|
||||
r = await self._look_up_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r["valid_until_ms"]
|
||||
if (
|
||||
@@ -352,7 +353,7 @@ class Auth(object):
|
||||
# It would of course be much easier to store guest access
|
||||
# tokens in the database as well, but that would break existing
|
||||
# guest tokens.
|
||||
stored_user = yield self.store.get_user_by_id(user_id)
|
||||
stored_user = await self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
@@ -362,6 +363,7 @@ class Auth(object):
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": True,
|
||||
"shadow_banned": False,
|
||||
"token_id": None,
|
||||
# all guests get the same device id
|
||||
"device_id": GUEST_DEVICE_ID,
|
||||
@@ -371,6 +373,7 @@ class Auth(object):
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": False,
|
||||
"shadow_banned": False,
|
||||
"token_id": None,
|
||||
"device_id": None,
|
||||
}
|
||||
@@ -482,9 +485,8 @@ class Auth(object):
|
||||
now = self.hs.get_clock().time_msec()
|
||||
return now < expiry
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
async def _look_up_user_by_access_token(self, token):
|
||||
ret = await self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
return None
|
||||
|
||||
@@ -495,6 +497,7 @@ class Auth(object):
|
||||
"user": UserID.from_string(ret.get("name")),
|
||||
"token_id": ret.get("token_id", None),
|
||||
"is_guest": False,
|
||||
"shadow_banned": ret.get("shadow_banned"),
|
||||
"device_id": ret.get("device_id"),
|
||||
"valid_until_ms": ret.get("valid_until_ms"),
|
||||
}
|
||||
@@ -507,7 +510,7 @@ class Auth(object):
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
""" Check if the given user is a local server admin.
|
||||
@@ -522,7 +525,7 @@ class Auth(object):
|
||||
|
||||
def compute_auth_events(
|
||||
self, event, current_state_ids: StateMap[str], for_verification: bool = False,
|
||||
):
|
||||
) -> List[str]:
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
|
||||
@@ -530,11 +533,11 @@ class Auth(object):
|
||||
should be added to the event's `auth_events`.
|
||||
|
||||
Returns:
|
||||
defer.Deferred(list[str]): List of event IDs.
|
||||
List of event IDs.
|
||||
"""
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
return defer.succeed([])
|
||||
return []
|
||||
|
||||
# Currently we ignore the `for_verification` flag even though there are
|
||||
# some situations where we can drop particular auth events when adding
|
||||
@@ -553,7 +556,7 @@ class Auth(object):
|
||||
if auth_ev_id:
|
||||
auth_ids.append(auth_ev_id)
|
||||
|
||||
return defer.succeed(auth_ids)
|
||||
return auth_ids
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
@@ -636,10 +639,9 @@ class Auth(object):
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_in_room_or_world_readable(
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, user_id: str, allow_departed_users: bool = False
|
||||
):
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
@@ -650,10 +652,9 @@ class Auth(object):
|
||||
members but have now departed
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
the user is not in the room and never has been, then
|
||||
`(Membership.JOIN, None)` is returned.
|
||||
Resolves to the current membership of the user in the room and the
|
||||
membership event ID of the user. If the user is not in the room and
|
||||
never has been, then `(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -662,15 +663,13 @@ class Auth(object):
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_in_room(
|
||||
member_event = await self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = yield defer.ensureDeferred(
|
||||
self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
visibility = await self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
@@ -36,8 +34,7 @@ class AuthBlocking(object):
|
||||
self._limit_usage_by_mau = hs.config.limit_usage_by_mau
|
||||
self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
async def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
@@ -60,7 +57,7 @@ class AuthBlocking(object):
|
||||
if user_id is not None:
|
||||
if user_id == self._server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
if await self.store.is_support_user(user_id):
|
||||
return
|
||||
|
||||
if self._hs_disabled:
|
||||
@@ -76,11 +73,11 @@ class AuthBlocking(object):
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
timestamp = await self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
is_trial = await self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
@@ -93,7 +90,7 @@ class AuthBlocking(object):
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
current_mau = await self.store.get_monthly_active_count()
|
||||
if current_mau >= self._max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
|
||||
@@ -238,14 +238,16 @@ class InteractiveAuthIncompleteError(Exception):
|
||||
(This indicates we should return a 401 with 'result' as the body)
|
||||
|
||||
Attributes:
|
||||
session_id: The ID of the ongoing interactive auth session.
|
||||
result: the server response to the request, which should be
|
||||
passed back to the client
|
||||
"""
|
||||
|
||||
def __init__(self, result: "JsonDict"):
|
||||
def __init__(self, session_id: str, result: "JsonDict"):
|
||||
super(InteractiveAuthIncompleteError, self).__init__(
|
||||
"Interactive auth not yet complete"
|
||||
)
|
||||
self.session_id = session_id
|
||||
self.result = result
|
||||
|
||||
|
||||
|
||||
@@ -21,11 +21,9 @@ import jsonschema
|
||||
from canonicaljson import json
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
@@ -137,9 +135,8 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
async def get_user_filter(self, user_localpart, filter_id):
|
||||
result = await self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(result)
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
@@ -22,7 +21,6 @@ import sys
|
||||
import traceback
|
||||
from typing import Iterable
|
||||
|
||||
from daemonize import Daemonize
|
||||
from typing_extensions import NoReturn
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
@@ -34,6 +32,7 @@ from synapse.config.server import ListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -129,17 +128,8 @@ def start_reactor(
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app=appname,
|
||||
pid=pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
daemonize_process(pid_file, logger)
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string: str) -> NoReturn:
|
||||
@@ -278,7 +268,7 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().db.start_profiling()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
setup_sentry(hs)
|
||||
|
||||
@@ -123,17 +123,18 @@ from synapse.rest.client.v2_alpha.account_data import (
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
from synapse.server import HomeServer, cache_in_self
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.data_stores.main.search import SearchWorkerStore
|
||||
from synapse.storage.data_stores.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.databases.main.presence import UserPresenceState
|
||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -493,7 +494,10 @@ class GenericWorkerServer(HomeServer):
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = port
|
||||
resources = {}
|
||||
|
||||
# We always include a health resource.
|
||||
resources = {"/health": HealthResource()}
|
||||
|
||||
for res in listener_config.http_options.resources:
|
||||
for name in res.names:
|
||||
if name == "metrics":
|
||||
@@ -628,13 +632,15 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
async def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
def build_replication_data_handler(self):
|
||||
@cache_in_self
|
||||
def get_replication_data_handler(self):
|
||||
return GenericWorkerReplicationHandler(self)
|
||||
|
||||
def build_presence_handler(self):
|
||||
@cache_in_self
|
||||
def get_presence_handler(self):
|
||||
return GenericWorkerPresence(self)
|
||||
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.well_known import WellKnownResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -98,7 +99,9 @@ class SynapseHomeServer(HomeServer):
|
||||
if site_tag is None:
|
||||
site_tag = port
|
||||
|
||||
resources = {}
|
||||
# We always include a health resource.
|
||||
resources = {"/health": HealthResource()}
|
||||
|
||||
for res in listener_config.http_options.resources:
|
||||
for name in res.names:
|
||||
if name == "openid" and "federation" in res.names:
|
||||
@@ -380,13 +383,12 @@ def setup(config_options):
|
||||
|
||||
hs.setup_master()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_acme():
|
||||
async def do_acme() -> bool:
|
||||
"""
|
||||
Reprovision an ACME certificate, if it's required.
|
||||
|
||||
Returns:
|
||||
Deferred[bool]: Whether the cert has been updated.
|
||||
Whether the cert has been updated.
|
||||
"""
|
||||
acme = hs.get_acme_handler()
|
||||
|
||||
@@ -405,7 +407,7 @@ def setup(config_options):
|
||||
provision = True
|
||||
|
||||
if provision:
|
||||
yield acme.provision_certificate()
|
||||
await acme.provision_certificate()
|
||||
|
||||
return provision
|
||||
|
||||
@@ -415,7 +417,7 @@ def setup(config_options):
|
||||
Provision a certificate from ACME, if required, and reload the TLS
|
||||
certificate if it's renewed.
|
||||
"""
|
||||
reprovisioned = yield do_acme()
|
||||
reprovisioned = yield defer.ensureDeferred(do_acme())
|
||||
if reprovisioned:
|
||||
_base.refresh_certificate(hs)
|
||||
|
||||
@@ -427,8 +429,8 @@ def setup(config_options):
|
||||
acme = hs.get_acme_handler()
|
||||
# Start up the webservices which we will respond to ACME
|
||||
# challenges with, and then provision.
|
||||
yield acme.start_listening()
|
||||
yield do_acme()
|
||||
yield defer.ensureDeferred(acme.start_listening())
|
||||
yield defer.ensureDeferred(do_acme())
|
||||
|
||||
# Check if it needs to be reprovisioned every day.
|
||||
hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
|
||||
@@ -442,7 +444,7 @@ def setup(config_options):
|
||||
|
||||
_base.start(hs, config.listeners)
|
||||
|
||||
hs.get_datastore().db.updates.start_doing_background_updates()
|
||||
hs.get_datastore().db_pool.updates.start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
@@ -552,8 +554,8 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
#
|
||||
|
||||
# This only reports info about the *main* database.
|
||||
stats["database_engine"] = hs.get_datastore().db.engine.module.__name__
|
||||
stats["database_server_version"] = hs.get_datastore().db.engine.server_version
|
||||
stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__
|
||||
stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version
|
||||
|
||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
||||
try:
|
||||
|
||||
@@ -15,11 +15,9 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -43,7 +41,7 @@ class AppServiceTransaction(object):
|
||||
Args:
|
||||
as_api(ApplicationServiceApi): The API to use to send.
|
||||
Returns:
|
||||
A Deferred which resolves to True if the transaction was sent.
|
||||
An Awaitable which resolves to True if the transaction was sent.
|
||||
"""
|
||||
return as_api.push_bulk(
|
||||
service=self.service, events=self.events, txn_id=self.id
|
||||
@@ -172,8 +170,7 @@ class ApplicationService(object):
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
async def _matches_user(self, event, store):
|
||||
if not event:
|
||||
return False
|
||||
|
||||
@@ -188,12 +185,12 @@ class ApplicationService(object):
|
||||
if not store:
|
||||
return False
|
||||
|
||||
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||
does_match = await self._matches_user_in_member_list(event.room_id, store)
|
||||
return does_match
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = yield store.get_users_in_room(
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = await store.get_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
@@ -208,35 +205,33 @@ class ApplicationService(object):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
async def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
return False
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
alias_list = await store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
async def is_interested(self, event, store=None) -> bool:
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
store(DataStore)
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
True if this service would like to know about this event.
|
||||
"""
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
return True
|
||||
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
if await self._matches_aliases(event, store):
|
||||
return True
|
||||
|
||||
if (yield self._matches_user(event, store)):
|
||||
if await self._matches_user(event, store):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -93,13 +93,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
async def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
return False
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -110,14 +109,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
async def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
return False
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -128,8 +125,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
async def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
@@ -146,7 +142,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
response = await self.get_json(uri, fields)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
@@ -179,7 +175,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
info = yield defer.ensureDeferred(self.get_json(uri, {}))
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
@@ -202,8 +198,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
async def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
return True
|
||||
|
||||
@@ -218,7 +213,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id))
|
||||
try:
|
||||
yield self.put_json(
|
||||
await self.put_json(
|
||||
uri=uri,
|
||||
json_body={"events": events},
|
||||
args={"access_token": service.hs_token},
|
||||
|
||||
@@ -50,8 +50,6 @@ components.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -73,12 +71,11 @@ class ApplicationServiceScheduler(object):
|
||||
self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
async def start(self):
|
||||
logger.info("Starting appservice scheduler")
|
||||
|
||||
# check for any DOWN ASes and start recoverers for them.
|
||||
services = yield self.store.get_appservices_by_state(
|
||||
services = await self.store.get_appservices_by_state(
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
|
||||
@@ -117,8 +114,7 @@ class _ServiceQueuer(object):
|
||||
"as-sender-%s" % (service.id,), self._send_request, service
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
async def _send_request(self, service):
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
# running.
|
||||
assert service.id not in self.requests_in_flight
|
||||
@@ -130,7 +126,7 @@ class _ServiceQueuer(object):
|
||||
if not events:
|
||||
return
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
await self.txn_ctrl.send(service, events)
|
||||
except Exception:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
@@ -162,36 +158,33 @@ class _TransactionController(object):
|
||||
# for UTs
|
||||
self.RECOVERER_CLASS = _Recoverer
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send(self, service, events):
|
||||
async def send(self, service, events):
|
||||
try:
|
||||
txn = yield self.store.create_appservice_txn(service=service, events=events)
|
||||
service_is_up = yield self._is_service_up(service)
|
||||
txn = await self.store.create_appservice_txn(service=service, events=events)
|
||||
service_is_up = await self._is_service_up(service)
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
sent = await txn.send(self.as_api)
|
||||
if sent:
|
||||
yield txn.complete(self.store)
|
||||
await txn.complete(self.store)
|
||||
else:
|
||||
run_in_background(self._on_txn_fail, service)
|
||||
except Exception:
|
||||
logger.exception("Error creating appservice transaction")
|
||||
run_in_background(self._on_txn_fail, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
async def on_recovered(self, recoverer):
|
||||
logger.info(
|
||||
"Successfully recovered application service AS ID %s", recoverer.service.id
|
||||
)
|
||||
self.recoverers.pop(recoverer.service.id)
|
||||
logger.info("Remaining active recoverers: %s", len(self.recoverers))
|
||||
yield self.store.set_appservice_state(
|
||||
await self.store.set_appservice_state(
|
||||
recoverer.service, ApplicationServiceState.UP
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_txn_fail(self, service):
|
||||
async def _on_txn_fail(self, service):
|
||||
try:
|
||||
yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
|
||||
await self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
|
||||
self.start_recoverer(service)
|
||||
except Exception:
|
||||
logger.exception("Error starting AS recoverer")
|
||||
@@ -211,9 +204,8 @@ class _TransactionController(object):
|
||||
recoverer.recover()
|
||||
logger.info("Now %i active recoverers", len(self.recoverers))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
state = yield self.store.get_appservice_state(service)
|
||||
async def _is_service_up(self, service):
|
||||
state = await self.store.get_appservice_state(service)
|
||||
return state == ApplicationServiceState.UP or state is None
|
||||
|
||||
|
||||
@@ -254,25 +246,24 @@ class _Recoverer(object):
|
||||
self.backoff_counter += 1
|
||||
self.recover()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def retry(self):
|
||||
async def retry(self):
|
||||
logger.info("Starting retries on %s", self.service.id)
|
||||
try:
|
||||
while True:
|
||||
txn = yield self.store.get_oldest_unsent_txn(self.service)
|
||||
txn = await self.store.get_oldest_unsent_txn(self.service)
|
||||
if not txn:
|
||||
# nothing left: we're done!
|
||||
self.callback(self)
|
||||
await self.callback(self)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Retrying transaction %s for AS ID %s", txn.id, txn.service.id
|
||||
)
|
||||
sent = yield txn.send(self.as_api)
|
||||
sent = await txn.send(self.as_api)
|
||||
if not sent:
|
||||
break
|
||||
|
||||
yield txn.complete(self.store)
|
||||
await txn.complete(self.store)
|
||||
|
||||
# reset the backoff counter and then process the next transaction
|
||||
self.backoff_counter = 1
|
||||
|
||||
@@ -18,12 +18,16 @@
|
||||
import argparse
|
||||
import errno
|
||||
import os
|
||||
import time
|
||||
import urllib.parse
|
||||
from collections import OrderedDict
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, MutableMapping, Optional
|
||||
from typing import Any, Callable, List, MutableMapping, Optional
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
import pkg_resources
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -100,6 +104,11 @@ class Config(object):
|
||||
def __init__(self, root_config=None):
|
||||
self.root = root_config
|
||||
|
||||
# Get the path to the default Synapse template directory
|
||||
self.default_template_dir = pkg_resources.resource_filename(
|
||||
"synapse", "res/templates"
|
||||
)
|
||||
|
||||
def __getattr__(self, item: str) -> Any:
|
||||
"""
|
||||
Try and fetch a configuration option that does not exist on this class.
|
||||
@@ -184,6 +193,95 @@ class Config(object):
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
|
||||
def read_templates(
|
||||
self, filenames: List[str], custom_template_directory: Optional[str] = None,
|
||||
) -> List[jinja2.Template]:
|
||||
"""Load a list of template files from disk using the given variables.
|
||||
|
||||
This function will attempt to load the given templates from the default Synapse
|
||||
template directory. If `custom_template_directory` is supplied, that directory
|
||||
is tried first.
|
||||
|
||||
Files read are treated as Jinja templates. These templates are not rendered yet.
|
||||
|
||||
Args:
|
||||
filenames: A list of template filenames to read.
|
||||
|
||||
custom_template_directory: A directory to try to look for the templates
|
||||
before using the default Synapse template directory instead.
|
||||
|
||||
Raises:
|
||||
ConfigError: if the file's path is incorrect or otherwise cannot be read.
|
||||
|
||||
Returns:
|
||||
A list of jinja2 templates.
|
||||
"""
|
||||
templates = []
|
||||
search_directories = [self.default_template_dir]
|
||||
|
||||
# The loader will first look in the custom template directory (if specified) for the
|
||||
# given filename. If it doesn't find it, it will use the default template dir instead
|
||||
if custom_template_directory:
|
||||
# Check that the given template directory exists
|
||||
if not self.path_exists(custom_template_directory):
|
||||
raise ConfigError(
|
||||
"Configured template directory does not exist: %s"
|
||||
% (custom_template_directory,)
|
||||
)
|
||||
|
||||
# Search the custom template directory as well
|
||||
search_directories.insert(0, custom_template_directory)
|
||||
|
||||
loader = jinja2.FileSystemLoader(search_directories)
|
||||
env = jinja2.Environment(loader=loader, autoescape=True)
|
||||
|
||||
# Update the environment with our custom filters
|
||||
env.filters.update(
|
||||
{
|
||||
"format_ts": _format_ts_filter,
|
||||
"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
|
||||
}
|
||||
)
|
||||
|
||||
for filename in filenames:
|
||||
# Load the template
|
||||
template = env.get_template(filename)
|
||||
templates.append(template)
|
||||
|
||||
return templates
|
||||
|
||||
|
||||
def _format_ts_filter(value: int, format: str):
|
||||
return time.strftime(format, time.localtime(value / 1000))
|
||||
|
||||
|
||||
def _create_mxc_to_http_filter(public_baseurl: str) -> Callable:
|
||||
"""Create and return a jinja2 filter that converts MXC urls to HTTP
|
||||
|
||||
Args:
|
||||
public_baseurl: The public, accessible base URL of the homeserver
|
||||
"""
|
||||
|
||||
def mxc_to_http_filter(value, width, height, resize_method="crop"):
|
||||
if value[0:6] != "mxc://":
|
||||
return ""
|
||||
|
||||
server_and_media_id = value[6:]
|
||||
fragment = None
|
||||
if "#" in server_and_media_id:
|
||||
server_and_media_id, fragment = server_and_media_id.split("#", 1)
|
||||
fragment = "#" + fragment
|
||||
|
||||
params = {"width": width, "height": height, "method": resize_method}
|
||||
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
|
||||
public_baseurl,
|
||||
server_and_media_id,
|
||||
urllib.parse.urlencode(params),
|
||||
fragment or "",
|
||||
)
|
||||
|
||||
return mxc_to_http_filter
|
||||
|
||||
|
||||
class RootConfig(object):
|
||||
"""
|
||||
|
||||
49
synapse/config/_util.py
Normal file
49
synapse/config/_util.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Any, List
|
||||
|
||||
import jsonschema
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.types import JsonDict
|
||||
|
||||
|
||||
def validate_config(json_schema: JsonDict, config: Any, config_path: List[str]) -> None:
|
||||
"""Validates a config setting against a JsonSchema definition
|
||||
|
||||
This can be used to validate a section of the config file against a schema
|
||||
definition. If the validation fails, a ConfigError is raised with a textual
|
||||
description of the problem.
|
||||
|
||||
Args:
|
||||
json_schema: the schema to validate against
|
||||
config: the configuration value to be validated
|
||||
config_path: the path within the config file. This will be used as a basis
|
||||
for the error message.
|
||||
"""
|
||||
try:
|
||||
jsonschema.validate(config, json_schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
# copy `config_path` before modifying it.
|
||||
path = list(config_path)
|
||||
for p in list(e.path):
|
||||
if isinstance(p, int):
|
||||
path.append("<item %i>" % p)
|
||||
else:
|
||||
path.append(str(p))
|
||||
|
||||
raise ConfigError(
|
||||
"Unable to parse configuration: %s at %s" % (e.message, ".".join(path))
|
||||
)
|
||||
@@ -100,7 +100,10 @@ class DatabaseConnectionConfig:
|
||||
|
||||
self.name = name
|
||||
self.config = db_config
|
||||
self.data_stores = data_stores
|
||||
|
||||
# The `data_stores` config is actually talking about `databases` (we
|
||||
# changed the name).
|
||||
self.databases = data_stores
|
||||
|
||||
|
||||
class DatabaseConfig(Config):
|
||||
|
||||
@@ -23,7 +23,6 @@ from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
import pkg_resources
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -98,21 +97,18 @@ class EmailConfig(Config):
|
||||
if parsed[1] == "":
|
||||
raise RuntimeError("Invalid notif_from address")
|
||||
|
||||
# A user-configurable template directory
|
||||
template_dir = email_config.get("template_dir")
|
||||
# we need an absolute path, because we change directory after starting (and
|
||||
# we don't yet know what auxiliary templates like mail.css we will need).
|
||||
# (Note that loading as package_resources with jinja.PackageLoader doesn't
|
||||
# work for the same reason.)
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
|
||||
|
||||
self.email_template_dir = os.path.abspath(template_dir)
|
||||
if isinstance(template_dir, str):
|
||||
# We need an absolute path, because we change directory after starting (and
|
||||
# we don't yet know what auxiliary templates like mail.css we will need).
|
||||
template_dir = os.path.abspath(template_dir)
|
||||
elif template_dir is not None:
|
||||
# If template_dir is something other than a str or None, warn the user
|
||||
raise ConfigError("Config option email.template_dir must be type str")
|
||||
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
account_validity_config = config.get("account_validity") or {}
|
||||
account_validity_renewal_enabled = account_validity_config.get("renew_at")
|
||||
|
||||
self.threepid_behaviour_email = (
|
||||
# Have Synapse handle the email sending if account_threepid_delegates.email
|
||||
# is not defined
|
||||
@@ -166,19 +162,6 @@ class EmailConfig(Config):
|
||||
email_config.get("validation_token_lifetime", "1h")
|
||||
)
|
||||
|
||||
if (
|
||||
self.email_enable_notifs
|
||||
or account_validity_renewal_enabled
|
||||
or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
|
||||
):
|
||||
# make sure we can import the required deps
|
||||
import bleach
|
||||
import jinja2
|
||||
|
||||
# prevent unused warnings
|
||||
jinja2
|
||||
bleach
|
||||
|
||||
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
missing = []
|
||||
if not self.email_notif_from:
|
||||
@@ -196,49 +179,49 @@ class EmailConfig(Config):
|
||||
|
||||
# These email templates have placeholders in them, and thus must be
|
||||
# parsed using a templating engine during a request
|
||||
self.email_password_reset_template_html = email_config.get(
|
||||
password_reset_template_html = email_config.get(
|
||||
"password_reset_template_html", "password_reset.html"
|
||||
)
|
||||
self.email_password_reset_template_text = email_config.get(
|
||||
password_reset_template_text = email_config.get(
|
||||
"password_reset_template_text", "password_reset.txt"
|
||||
)
|
||||
self.email_registration_template_html = email_config.get(
|
||||
registration_template_html = email_config.get(
|
||||
"registration_template_html", "registration.html"
|
||||
)
|
||||
self.email_registration_template_text = email_config.get(
|
||||
registration_template_text = email_config.get(
|
||||
"registration_template_text", "registration.txt"
|
||||
)
|
||||
self.email_add_threepid_template_html = email_config.get(
|
||||
add_threepid_template_html = email_config.get(
|
||||
"add_threepid_template_html", "add_threepid.html"
|
||||
)
|
||||
self.email_add_threepid_template_text = email_config.get(
|
||||
add_threepid_template_text = email_config.get(
|
||||
"add_threepid_template_text", "add_threepid.txt"
|
||||
)
|
||||
|
||||
self.email_password_reset_template_failure_html = email_config.get(
|
||||
password_reset_template_failure_html = email_config.get(
|
||||
"password_reset_template_failure_html", "password_reset_failure.html"
|
||||
)
|
||||
self.email_registration_template_failure_html = email_config.get(
|
||||
registration_template_failure_html = email_config.get(
|
||||
"registration_template_failure_html", "registration_failure.html"
|
||||
)
|
||||
self.email_add_threepid_template_failure_html = email_config.get(
|
||||
add_threepid_template_failure_html = email_config.get(
|
||||
"add_threepid_template_failure_html", "add_threepid_failure.html"
|
||||
)
|
||||
|
||||
# These templates do not support any placeholder variables, so we
|
||||
# will read them from disk once during setup
|
||||
email_password_reset_template_success_html = email_config.get(
|
||||
password_reset_template_success_html = email_config.get(
|
||||
"password_reset_template_success_html", "password_reset_success.html"
|
||||
)
|
||||
email_registration_template_success_html = email_config.get(
|
||||
registration_template_success_html = email_config.get(
|
||||
"registration_template_success_html", "registration_success.html"
|
||||
)
|
||||
email_add_threepid_template_success_html = email_config.get(
|
||||
add_threepid_template_success_html = email_config.get(
|
||||
"add_threepid_template_success_html", "add_threepid_success.html"
|
||||
)
|
||||
|
||||
# Check templates exist
|
||||
for f in [
|
||||
# Read all templates from disk
|
||||
(
|
||||
self.email_password_reset_template_html,
|
||||
self.email_password_reset_template_text,
|
||||
self.email_registration_template_html,
|
||||
@@ -248,32 +231,36 @@ class EmailConfig(Config):
|
||||
self.email_password_reset_template_failure_html,
|
||||
self.email_registration_template_failure_html,
|
||||
self.email_add_threepid_template_failure_html,
|
||||
email_password_reset_template_success_html,
|
||||
email_registration_template_success_html,
|
||||
email_add_threepid_template_success_html,
|
||||
]:
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find template file %s" % (p,))
|
||||
password_reset_template_success_html_template,
|
||||
registration_template_success_html_template,
|
||||
add_threepid_template_success_html_template,
|
||||
) = self.read_templates(
|
||||
[
|
||||
password_reset_template_html,
|
||||
password_reset_template_text,
|
||||
registration_template_html,
|
||||
registration_template_text,
|
||||
add_threepid_template_html,
|
||||
add_threepid_template_text,
|
||||
password_reset_template_failure_html,
|
||||
registration_template_failure_html,
|
||||
add_threepid_template_failure_html,
|
||||
password_reset_template_success_html,
|
||||
registration_template_success_html,
|
||||
add_threepid_template_success_html,
|
||||
],
|
||||
template_dir,
|
||||
)
|
||||
|
||||
# Retrieve content of web templates
|
||||
filepath = os.path.join(
|
||||
self.email_template_dir, email_password_reset_template_success_html
|
||||
# Render templates that do not contain any placeholders
|
||||
self.email_password_reset_template_success_html_content = (
|
||||
password_reset_template_success_html_template.render()
|
||||
)
|
||||
self.email_password_reset_template_success_html = self.read_file(
|
||||
filepath, "email.password_reset_template_success_html"
|
||||
self.email_registration_template_success_html_content = (
|
||||
registration_template_success_html_template.render()
|
||||
)
|
||||
filepath = os.path.join(
|
||||
self.email_template_dir, email_registration_template_success_html
|
||||
)
|
||||
self.email_registration_template_success_html_content = self.read_file(
|
||||
filepath, "email.registration_template_success_html"
|
||||
)
|
||||
filepath = os.path.join(
|
||||
self.email_template_dir, email_add_threepid_template_success_html
|
||||
)
|
||||
self.email_add_threepid_template_success_html_content = self.read_file(
|
||||
filepath, "email.add_threepid_template_success_html"
|
||||
self.email_add_threepid_template_success_html_content = (
|
||||
add_threepid_template_success_html_template.render()
|
||||
)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
@@ -290,17 +277,19 @@ class EmailConfig(Config):
|
||||
% (", ".join(missing),)
|
||||
)
|
||||
|
||||
self.email_notif_template_html = email_config.get(
|
||||
notif_template_html = email_config.get(
|
||||
"notif_template_html", "notif_mail.html"
|
||||
)
|
||||
self.email_notif_template_text = email_config.get(
|
||||
notif_template_text = email_config.get(
|
||||
"notif_template_text", "notif_mail.txt"
|
||||
)
|
||||
|
||||
for f in self.email_notif_template_text, self.email_notif_template_html:
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find email template file %s" % (p,))
|
||||
(
|
||||
self.email_notif_template_html,
|
||||
self.email_notif_template_text,
|
||||
) = self.read_templates(
|
||||
[notif_template_html, notif_template_text], template_dir,
|
||||
)
|
||||
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
@@ -309,18 +298,20 @@ class EmailConfig(Config):
|
||||
"client_base_url", email_config.get("riot_base_url", None)
|
||||
)
|
||||
|
||||
if account_validity_renewal_enabled:
|
||||
self.email_expiry_template_html = email_config.get(
|
||||
if self.account_validity.renew_by_email_enabled:
|
||||
expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html"
|
||||
)
|
||||
self.email_expiry_template_text = email_config.get(
|
||||
expiry_template_text = email_config.get(
|
||||
"expiry_template_text", "notice_expiry.txt"
|
||||
)
|
||||
|
||||
for f in self.email_expiry_template_text, self.email_expiry_template_html:
|
||||
p = os.path.join(self.email_template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find email template file %s" % (p,))
|
||||
(
|
||||
self.account_validity_template_html,
|
||||
self.account_validity_template_text,
|
||||
) = self.read_templates(
|
||||
[expiry_template_html, expiry_template_text], template_dir,
|
||||
)
|
||||
|
||||
subjects_config = email_config.get("subjects", {})
|
||||
subjects = {}
|
||||
@@ -400,9 +391,7 @@ class EmailConfig(Config):
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
# Do not uncomment this setting unless you want to customise the templates.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
|
||||
@@ -55,24 +55,33 @@ formatters:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
|
||||
%(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: ${log_file}
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
when: midnight
|
||||
backupCount: 3 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
# A handler that writes logs to stderr. Unused by default, but can be used
|
||||
# instead of "buffer" and "file" in the logger handlers.
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
@@ -80,9 +89,24 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [file, console]
|
||||
|
||||
# Write logs to the `buffer` handler, which will buffer them together in memory,
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuation for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
||||
disable_existing_loggers: false
|
||||
"""
|
||||
@@ -168,11 +192,26 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
logger.addHandler(handler)
|
||||
else:
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
# We add a log record factory that runs all messages through the
|
||||
# LoggingContextFilter so that we get the context *at the time we log*
|
||||
# rather than when we write to a handler. This can be done in config using
|
||||
# filter options, but care must when using e.g. MemoryHandler to buffer
|
||||
# writes.
|
||||
|
||||
log_filter = LoggingContextFilter(request="")
|
||||
old_factory = logging.getLogRecordFactory()
|
||||
|
||||
def factory(*args, **kwargs):
|
||||
record = old_factory(*args, **kwargs)
|
||||
log_filter.filter(record)
|
||||
return record
|
||||
|
||||
logging.setLogRecordFactory(factory)
|
||||
|
||||
# Route Twisted's native logging through to the standard library logging
|
||||
# system.
|
||||
observer = STDLibLogObserver()
|
||||
|
||||
@@ -93,6 +93,15 @@ class RatelimitConfig(Config):
|
||||
if rc_admin_redaction:
|
||||
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
|
||||
|
||||
self.rc_joins_local = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("local", {}),
|
||||
defaults={"per_second": 0.1, "burst_count": 3},
|
||||
)
|
||||
self.rc_joins_remote = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("remote", {}),
|
||||
defaults={"per_second": 0.01, "burst_count": 3},
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
## Ratelimiting ##
|
||||
@@ -118,6 +127,10 @@ class RatelimitConfig(Config):
|
||||
# - one for ratelimiting redactions by room admins. If this is not explicitly
|
||||
# set then it uses the same ratelimiting as per rc_message. This is useful
|
||||
# to allow room admins to deal with abuse quickly.
|
||||
# - two for ratelimiting number of rooms a user can join, "local" for when
|
||||
# users are joining rooms the server is already in (this is cheap) vs
|
||||
# "remote" for when users are trying to join rooms not on the server (which
|
||||
# can be more expensive)
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
@@ -143,6 +156,14 @@ class RatelimitConfig(Config):
|
||||
#rc_admin_redaction:
|
||||
# per_second: 1
|
||||
# burst_count: 50
|
||||
#
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
|
||||
|
||||
# Ratelimiting settings for incoming federation
|
||||
|
||||
@@ -333,24 +333,6 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
|
||||
# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
|
||||
# background migration script, informing itself that the identity server all of its
|
||||
# 3PIDs have been bound to is likely one of the below.
|
||||
#
|
||||
# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
|
||||
# it is now solely used for the purposes of the background migration script, and can be
|
||||
# removed once it has run.
|
||||
#trusted_third_party_id_servers:
|
||||
# - matrix.org
|
||||
# - vector.im
|
||||
|
||||
# Handle threepid (email/phone etc) registration and password resets through a set of
|
||||
# *trusted* identity servers. Note that this allows the configured identity server to
|
||||
# reset passwords for accounts!
|
||||
|
||||
@@ -15,14 +15,15 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, List
|
||||
|
||||
import jinja2
|
||||
import pkg_resources
|
||||
import attr
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.module_loader import load_module, load_python_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._util import validate_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -80,6 +81,11 @@ class SAML2Config(Config):
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
attribute_requirements = saml2_config.get("attribute_requirements") or []
|
||||
self.attribute_requirements = _parse_attribute_requirements_def(
|
||||
attribute_requirements
|
||||
)
|
||||
|
||||
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
|
||||
"grandfathered_mxid_source_attribute", "uid"
|
||||
)
|
||||
@@ -163,15 +169,9 @@ class SAML2Config(Config):
|
||||
saml2_config.get("saml_session_lifetime", "15m")
|
||||
)
|
||||
|
||||
template_dir = saml2_config.get("template_dir")
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
|
||||
|
||||
loader = jinja2.FileSystemLoader(template_dir)
|
||||
# enable auto-escape here, to having to remember to escape manually in the
|
||||
# template
|
||||
env = jinja2.Environment(loader=loader, autoescape=True)
|
||||
self.saml2_error_html_template = env.get_template("saml_error.html")
|
||||
self.saml2_error_html_template = self.read_templates(
|
||||
["saml_error.html"], saml2_config.get("template_dir")
|
||||
)
|
||||
|
||||
def _default_saml_config_dict(
|
||||
self, required_attributes: set, optional_attributes: set
|
||||
@@ -341,6 +341,17 @@ class SAML2Config(Config):
|
||||
#
|
||||
#grandfathered_mxid_source_attribute: upn
|
||||
|
||||
# It is possible to configure Synapse to only allow logins if SAML attributes
|
||||
# match particular values. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted.
|
||||
#
|
||||
#attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "staff"
|
||||
# - attribute: department
|
||||
# value: "sales"
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
@@ -368,3 +379,34 @@ class SAML2Config(Config):
|
||||
""" % {
|
||||
"config_dir_path": config_dir_path
|
||||
}
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class SamlAttributeRequirement:
|
||||
"""Object describing a single requirement for SAML attributes."""
|
||||
|
||||
attribute = attr.ib(type=str)
|
||||
value = attr.ib(type=str)
|
||||
|
||||
JSON_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {"attribute": {"type": "string"}, "value": {"type": "string"}},
|
||||
"required": ["attribute", "value"],
|
||||
}
|
||||
|
||||
|
||||
ATTRIBUTE_REQUIREMENTS_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": SamlAttributeRequirement.JSON_SCHEMA,
|
||||
}
|
||||
|
||||
|
||||
def _parse_attribute_requirements_def(
|
||||
attribute_requirements: Any,
|
||||
) -> List[SamlAttributeRequirement]:
|
||||
validate_config(
|
||||
ATTRIBUTE_REQUIREMENTS_SCHEMA,
|
||||
attribute_requirements,
|
||||
config_path=["saml2_config", "attribute_requirements"],
|
||||
)
|
||||
return [SamlAttributeRequirement(**x) for x in attribute_requirements]
|
||||
|
||||
@@ -26,7 +26,6 @@ import yaml
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -439,6 +438,9 @@ class ServerConfig(Config):
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
admins_can_join = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
|
||||
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||
**(config.get("limit_remote_rooms") or {})
|
||||
@@ -505,8 +507,6 @@ class ServerConfig(Config):
|
||||
)
|
||||
)
|
||||
|
||||
_check_resource_config(self.listeners)
|
||||
|
||||
self.cleanup_extremities_with_dummy_events = config.get(
|
||||
"cleanup_extremities_with_dummy_events", True
|
||||
)
|
||||
@@ -527,6 +527,21 @@ class ServerConfig(Config):
|
||||
"request_token_inhibit_3pid_errors", False,
|
||||
)
|
||||
|
||||
# List of users trialing the new experimental default push rules. This setting is
|
||||
# not included in the sample configuration file on purpose as it's a temporary
|
||||
# hack, so that some users can trial the new defaults without impacting every
|
||||
# user on the homeserver.
|
||||
users_new_default_push_rules = (
|
||||
config.get("users_new_default_push_rules") or []
|
||||
) # type: list
|
||||
if not isinstance(users_new_default_push_rules, list):
|
||||
raise ConfigError("'users_new_default_push_rules' must be a list")
|
||||
|
||||
# Turn the list into a set to improve lookup speed.
|
||||
self.users_new_default_push_rules = set(
|
||||
users_new_default_push_rules
|
||||
) # type: set
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(listener.tls for listener in self.listeners)
|
||||
|
||||
@@ -893,6 +908,10 @@ class ServerConfig(Config):
|
||||
#
|
||||
#complexity_error: "This room is too complex."
|
||||
|
||||
# allow server admins to join complex rooms. Default is false.
|
||||
#
|
||||
#admins_can_join: true
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
@@ -1111,20 +1130,3 @@ def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
|
||||
if name == "webclient":
|
||||
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
|
||||
return
|
||||
|
||||
|
||||
def _check_resource_config(listeners: Iterable[ListenerConfig]) -> None:
|
||||
resource_names = {
|
||||
res_name
|
||||
for listener in listeners
|
||||
if listener.http_options
|
||||
for res in listener.http_options.resources
|
||||
for res_name in res.names
|
||||
}
|
||||
|
||||
for resource in resource_names:
|
||||
if resource == "consent":
|
||||
try:
|
||||
check_requirements("resources.consent")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
|
||||
@@ -12,11 +12,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
@@ -29,22 +26,32 @@ class SSOConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
sso_config = config.get("sso") or {} # type: Dict[str, Any]
|
||||
|
||||
# Pick a template directory in order of:
|
||||
# * The sso-specific template_dir
|
||||
# * /path/to/synapse/install/res/templates
|
||||
# The sso-specific template_dir
|
||||
template_dir = sso_config.get("template_dir")
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates",)
|
||||
|
||||
self.sso_template_dir = template_dir
|
||||
self.sso_account_deactivated_template = self.read_file(
|
||||
os.path.join(self.sso_template_dir, "sso_account_deactivated.html"),
|
||||
"sso_account_deactivated_template",
|
||||
# Read templates from disk
|
||||
(
|
||||
self.sso_redirect_confirm_template,
|
||||
self.sso_auth_confirm_template,
|
||||
self.sso_error_template,
|
||||
sso_account_deactivated_template,
|
||||
sso_auth_success_template,
|
||||
) = self.read_templates(
|
||||
[
|
||||
"sso_redirect_confirm.html",
|
||||
"sso_auth_confirm.html",
|
||||
"sso_error.html",
|
||||
"sso_account_deactivated.html",
|
||||
"sso_auth_success.html",
|
||||
],
|
||||
template_dir,
|
||||
)
|
||||
self.sso_auth_success_template = self.read_file(
|
||||
os.path.join(self.sso_template_dir, "sso_auth_success.html"),
|
||||
"sso_auth_success_template",
|
||||
|
||||
# These templates have no placeholders, so render them here
|
||||
self.sso_account_deactivated_template = (
|
||||
sso_account_deactivated_template.render()
|
||||
)
|
||||
self.sso_auth_success_template = sso_auth_success_template.render()
|
||||
|
||||
self.sso_client_whitelist = sso_config.get("client_whitelist") or []
|
||||
|
||||
|
||||
@@ -48,6 +48,14 @@ class ServerContextFactory(ContextFactory):
|
||||
connections."""
|
||||
|
||||
def __init__(self, config):
|
||||
# TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version,
|
||||
# switch to those (see https://github.com/pyca/cryptography/issues/5379).
|
||||
#
|
||||
# note that, despite the confusing name, SSLv23_METHOD does *not* enforce SSLv2
|
||||
# or v3, but is a synonym for TLS_METHOD, which allows the client and server
|
||||
# to negotiate an appropriate version of TLS constrained by the version options
|
||||
# set with context.set_options.
|
||||
#
|
||||
self._context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
self.configure_context(self._context, config)
|
||||
|
||||
|
||||
@@ -223,8 +223,7 @@ class Keyring(object):
|
||||
|
||||
return results
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_key_lookups(self, verify_requests):
|
||||
async def _start_key_lookups(self, verify_requests):
|
||||
"""Sets off the key fetches for each verify request
|
||||
|
||||
Once each fetch completes, verify_request.key_ready will be resolved.
|
||||
@@ -245,7 +244,7 @@ class Keyring(object):
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
# Wait for any previous lookups to complete before proceeding.
|
||||
yield self.wait_for_previous_lookups(server_to_request_ids.keys())
|
||||
await self.wait_for_previous_lookups(server_to_request_ids.keys())
|
||||
|
||||
# take out a lock on each of the servers by sticking a Deferred in
|
||||
# key_downloads
|
||||
@@ -283,15 +282,14 @@ class Keyring(object):
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_previous_lookups(self, server_names):
|
||||
async def wait_for_previous_lookups(self, server_names) -> None:
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
|
||||
Args:
|
||||
server_names (Iterable[str]): list of servers which we want to look up
|
||||
|
||||
Returns:
|
||||
Deferred[None]: resolves once all key lookups for the given servers have
|
||||
Resolves once all key lookups for the given servers have
|
||||
completed. Follows the synapse rules of logcontext preservation.
|
||||
"""
|
||||
loop_count = 1
|
||||
@@ -309,7 +307,7 @@ class Keyring(object):
|
||||
loop_count,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList((w[1] for w in wait_on))
|
||||
await defer.DeferredList((w[1] for w in wait_on))
|
||||
|
||||
loop_count += 1
|
||||
|
||||
@@ -326,44 +324,44 @@ class Keyring(object):
|
||||
|
||||
remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_iterations():
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
for f in self._key_fetchers:
|
||||
if not remaining_requests:
|
||||
return
|
||||
yield self._attempt_key_fetches_with_fetcher(f, remaining_requests)
|
||||
|
||||
# look for any requests which weren't satisfied
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in remaining_requests:
|
||||
verify_request.key_ready.errback(
|
||||
SynapseError(
|
||||
401,
|
||||
"No key for %s with ids in %s (min_validity %i)"
|
||||
% (
|
||||
verify_request.server_name,
|
||||
verify_request.key_ids,
|
||||
verify_request.minimum_valid_until_ts,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
async def do_iterations():
|
||||
try:
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
for f in self._key_fetchers:
|
||||
if not remaining_requests:
|
||||
return
|
||||
await self._attempt_key_fetches_with_fetcher(
|
||||
f, remaining_requests
|
||||
)
|
||||
|
||||
def on_err(err):
|
||||
# we don't really expect to get here, because any errors should already
|
||||
# have been caught and logged. But if we do, let's log the error and make
|
||||
# sure that all of the deferreds are resolved.
|
||||
logger.error("Unexpected error in _get_server_verify_keys: %s", err)
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in remaining_requests:
|
||||
if not verify_request.key_ready.called:
|
||||
verify_request.key_ready.errback(err)
|
||||
# look for any requests which weren't satisfied
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in remaining_requests:
|
||||
verify_request.key_ready.errback(
|
||||
SynapseError(
|
||||
401,
|
||||
"No key for %s with ids in %s (min_validity %i)"
|
||||
% (
|
||||
verify_request.server_name,
|
||||
verify_request.key_ids,
|
||||
verify_request.minimum_valid_until_ts,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
)
|
||||
except Exception as err:
|
||||
# we don't really expect to get here, because any errors should already
|
||||
# have been caught and logged. But if we do, let's log the error and make
|
||||
# sure that all of the deferreds are resolved.
|
||||
logger.error("Unexpected error in _get_server_verify_keys: %s", err)
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in remaining_requests:
|
||||
if not verify_request.key_ready.called:
|
||||
verify_request.key_ready.errback(err)
|
||||
|
||||
run_in_background(do_iterations).addErrback(on_err)
|
||||
run_in_background(do_iterations)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests):
|
||||
async def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests):
|
||||
"""Use a key fetcher to attempt to satisfy some key requests
|
||||
|
||||
Args:
|
||||
@@ -390,7 +388,7 @@ class Keyring(object):
|
||||
verify_request.minimum_valid_until_ts,
|
||||
)
|
||||
|
||||
results = yield fetcher.get_keys(missing_keys)
|
||||
results = await fetcher.get_keys(missing_keys)
|
||||
|
||||
completed = []
|
||||
for verify_request in remaining_requests:
|
||||
@@ -423,7 +421,7 @@ class Keyring(object):
|
||||
|
||||
|
||||
class KeyFetcher(object):
|
||||
def get_keys(self, keys_to_fetch):
|
||||
async def get_keys(self, keys_to_fetch):
|
||||
"""
|
||||
Args:
|
||||
keys_to_fetch (dict[str, dict[str, int]]):
|
||||
@@ -442,8 +440,7 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys(self, keys_to_fetch):
|
||||
async def get_keys(self, keys_to_fetch):
|
||||
"""see KeyFetcher.get_keys"""
|
||||
|
||||
keys_to_fetch = (
|
||||
@@ -452,7 +449,7 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
for key_id in keys_for_server.keys()
|
||||
)
|
||||
|
||||
res = yield self.store.get_server_verify_keys(keys_to_fetch)
|
||||
res = await self.store.get_server_verify_keys(keys_to_fetch)
|
||||
keys = {}
|
||||
for (server_name, key_id), key in res.items():
|
||||
keys.setdefault(server_name, {})[key_id] = key
|
||||
@@ -464,8 +461,7 @@ class BaseV2KeyFetcher(object):
|
||||
self.store = hs.get_datastore()
|
||||
self.config = hs.get_config()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_v2_response(self, from_server, response_json, time_added_ms):
|
||||
async def process_v2_response(self, from_server, response_json, time_added_ms):
|
||||
"""Parse a 'Server Keys' structure from the result of a /key request
|
||||
|
||||
This is used to parse either the entirety of the response from
|
||||
@@ -537,7 +533,7 @@ class BaseV2KeyFetcher(object):
|
||||
|
||||
key_json_bytes = encode_canonical_json(response_json)
|
||||
|
||||
yield make_deferred_yieldable(
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
@@ -567,14 +563,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
self.client = hs.get_http_client()
|
||||
self.key_servers = self.config.key_servers
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys(self, keys_to_fetch):
|
||||
async def get_keys(self, keys_to_fetch):
|
||||
"""see KeyFetcher.get_keys"""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_key(key_server):
|
||||
async def get_key(key_server):
|
||||
try:
|
||||
result = yield self.get_server_verify_key_v2_indirect(
|
||||
result = await self.get_server_verify_key_v2_indirect(
|
||||
keys_to_fetch, key_server
|
||||
)
|
||||
return result
|
||||
@@ -592,7 +586,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
|
||||
return {}
|
||||
|
||||
results = yield make_deferred_yieldable(
|
||||
results = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[run_in_background(get_key, server) for server in self.key_servers],
|
||||
consumeErrors=True,
|
||||
@@ -606,8 +600,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
|
||||
return union_of_keys
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
|
||||
async def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
|
||||
"""
|
||||
Args:
|
||||
keys_to_fetch (dict[str, dict[str, int]]):
|
||||
@@ -617,7 +610,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
the keys
|
||||
|
||||
Returns:
|
||||
Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map
|
||||
dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]: map
|
||||
from server_name -> key_id -> FetchKeyResult
|
||||
|
||||
Raises:
|
||||
@@ -632,7 +625,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
)
|
||||
|
||||
try:
|
||||
query_response = yield self.client.post_json(
|
||||
query_response = await self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
@@ -668,7 +661,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
try:
|
||||
self._validate_perspectives_response(key_server, response)
|
||||
|
||||
processed_response = yield self.process_v2_response(
|
||||
processed_response = await self.process_v2_response(
|
||||
perspective_name, response, time_added_ms=time_now_ms
|
||||
)
|
||||
except KeyLookupError as e:
|
||||
@@ -687,7 +680,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
)
|
||||
keys.setdefault(server_name, {}).update(processed_response)
|
||||
|
||||
yield self.store.store_server_verify_keys(
|
||||
await self.store.store_server_verify_keys(
|
||||
perspective_name, time_now_ms, added_keys
|
||||
)
|
||||
|
||||
@@ -739,24 +732,23 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
self.clock = hs.get_clock()
|
||||
self.client = hs.get_http_client()
|
||||
|
||||
def get_keys(self, keys_to_fetch):
|
||||
async def get_keys(self, keys_to_fetch):
|
||||
"""
|
||||
Args:
|
||||
keys_to_fetch (dict[str, iterable[str]]):
|
||||
the keys to be fetched. server_name -> key_ids
|
||||
|
||||
Returns:
|
||||
Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]:
|
||||
dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]:
|
||||
map from server_name -> key_id -> FetchKeyResult
|
||||
"""
|
||||
|
||||
results = {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_key(key_to_fetch_item):
|
||||
async def get_key(key_to_fetch_item):
|
||||
server_name, key_ids = key_to_fetch_item
|
||||
try:
|
||||
keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids)
|
||||
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
|
||||
results[server_name] = keys
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
@@ -765,12 +757,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
except Exception:
|
||||
logger.exception("Error getting keys %s from %s", key_ids, server_name)
|
||||
|
||||
return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback(
|
||||
lambda _: results
|
||||
)
|
||||
return await yieldable_gather_results(
|
||||
get_key, keys_to_fetch.items()
|
||||
).addCallback(lambda _: results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v2_direct(self, server_name, key_ids):
|
||||
async def get_server_verify_key_v2_direct(self, server_name, key_ids):
|
||||
"""
|
||||
|
||||
Args:
|
||||
@@ -792,7 +783,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
|
||||
time_now_ms = self.clock.time_msec()
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
response = await self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/"
|
||||
+ urllib.parse.quote(requested_key_id),
|
||||
@@ -823,12 +814,12 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
% (server_name, response["server_name"])
|
||||
)
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
response_keys = await self.process_v2_response(
|
||||
from_server=server_name,
|
||||
response_json=response,
|
||||
time_added_ms=time_now_ms,
|
||||
)
|
||||
yield self.store.store_server_verify_keys(
|
||||
await self.store.store_server_verify_keys(
|
||||
server_name,
|
||||
time_now_ms,
|
||||
((server_name, key_id, key) for key_id, key in response_keys.items()),
|
||||
@@ -838,22 +829,18 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
return keys
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_key_deferred(verify_request):
|
||||
async def _handle_key_deferred(verify_request) -> None:
|
||||
"""Waits for the key to become available, and then performs a verification
|
||||
|
||||
Args:
|
||||
verify_request (VerifyJsonRequest):
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
|
||||
Raises:
|
||||
SynapseError if there was a problem performing the verification
|
||||
"""
|
||||
server_name = verify_request.server_name
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = yield verify_request.key_ready
|
||||
_, key_id, verify_key = await verify_request.key_ready
|
||||
|
||||
json_object = verify_request.json_object
|
||||
|
||||
|
||||
@@ -17,8 +17,7 @@ from typing import Optional
|
||||
import attr
|
||||
from nacl.signing import SigningKey
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import (
|
||||
@@ -29,6 +28,8 @@ from synapse.api.room_versions import (
|
||||
)
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
|
||||
from synapse.state import StateHandler
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import EventID, JsonDict
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
@@ -44,45 +45,46 @@ class EventBuilder(object):
|
||||
|
||||
Attributes:
|
||||
room_version: Version of the target room
|
||||
room_id (str)
|
||||
type (str)
|
||||
sender (str)
|
||||
content (dict)
|
||||
unsigned (dict)
|
||||
internal_metadata (_EventInternalMetadata)
|
||||
room_id
|
||||
type
|
||||
sender
|
||||
content
|
||||
unsigned
|
||||
internal_metadata
|
||||
|
||||
_state (StateHandler)
|
||||
_auth (synapse.api.Auth)
|
||||
_store (DataStore)
|
||||
_clock (Clock)
|
||||
_hostname (str): The hostname of the server creating the event
|
||||
_state
|
||||
_auth
|
||||
_store
|
||||
_clock
|
||||
_hostname: The hostname of the server creating the event
|
||||
_signing_key: The signing key to use to sign the event as the server
|
||||
"""
|
||||
|
||||
_state = attr.ib()
|
||||
_auth = attr.ib()
|
||||
_store = attr.ib()
|
||||
_clock = attr.ib()
|
||||
_hostname = attr.ib()
|
||||
_signing_key = attr.ib()
|
||||
_state = attr.ib(type=StateHandler)
|
||||
_auth = attr.ib(type=Auth)
|
||||
_store = attr.ib(type=DataStore)
|
||||
_clock = attr.ib(type=Clock)
|
||||
_hostname = attr.ib(type=str)
|
||||
_signing_key = attr.ib(type=SigningKey)
|
||||
|
||||
room_version = attr.ib(type=RoomVersion)
|
||||
|
||||
room_id = attr.ib()
|
||||
type = attr.ib()
|
||||
sender = attr.ib()
|
||||
room_id = attr.ib(type=str)
|
||||
type = attr.ib(type=str)
|
||||
sender = attr.ib(type=str)
|
||||
|
||||
content = attr.ib(default=attr.Factory(dict))
|
||||
unsigned = attr.ib(default=attr.Factory(dict))
|
||||
content = attr.ib(default=attr.Factory(dict), type=JsonDict)
|
||||
unsigned = attr.ib(default=attr.Factory(dict), type=JsonDict)
|
||||
|
||||
# These only exist on a subset of events, so they raise AttributeError if
|
||||
# someone tries to get them when they don't exist.
|
||||
_state_key = attr.ib(default=None)
|
||||
_redacts = attr.ib(default=None)
|
||||
_origin_server_ts = attr.ib(default=None)
|
||||
_state_key = attr.ib(default=None, type=Optional[str])
|
||||
_redacts = attr.ib(default=None, type=Optional[str])
|
||||
_origin_server_ts = attr.ib(default=None, type=Optional[int])
|
||||
|
||||
internal_metadata = attr.ib(
|
||||
default=attr.Factory(lambda: _EventInternalMetadata({}))
|
||||
default=attr.Factory(lambda: _EventInternalMetadata({})),
|
||||
type=_EventInternalMetadata,
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -95,31 +97,30 @@ class EventBuilder(object):
|
||||
def is_state(self):
|
||||
return self._state_key is not None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def build(self, prev_event_ids):
|
||||
async def build(self, prev_event_ids):
|
||||
"""Transform into a fully signed and hashed event
|
||||
|
||||
Args:
|
||||
prev_event_ids (list[str]): The event IDs to use as the prev events
|
||||
|
||||
Returns:
|
||||
Deferred[FrozenEvent]
|
||||
FrozenEvent
|
||||
"""
|
||||
|
||||
state_ids = yield defer.ensureDeferred(
|
||||
self._state.get_current_state_ids(self.room_id, prev_event_ids)
|
||||
state_ids = await self._state.get_current_state_ids(
|
||||
self.room_id, prev_event_ids
|
||||
)
|
||||
auth_ids = yield self._auth.compute_auth_events(self, state_ids)
|
||||
auth_ids = self._auth.compute_auth_events(self, state_ids)
|
||||
|
||||
format_version = self.room_version.event_format
|
||||
if format_version == EventFormatVersions.V1:
|
||||
auth_events = yield self._store.add_event_hashes(auth_ids)
|
||||
prev_events = yield self._store.add_event_hashes(prev_event_ids)
|
||||
auth_events = await self._store.add_event_hashes(auth_ids)
|
||||
prev_events = await self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
auth_events = auth_ids
|
||||
prev_events = prev_event_ids
|
||||
|
||||
old_depth = yield self._store.get_max_depth_of(prev_event_ids)
|
||||
old_depth = await self._store.get_max_depth_of(prev_event_ids)
|
||||
depth = old_depth + 1
|
||||
|
||||
# we cap depth of generated events, to ensure that they are not
|
||||
|
||||
@@ -12,17 +12,19 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional, Union
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class EventContext:
|
||||
@@ -129,8 +131,7 @@ class EventContext:
|
||||
delta_ids=delta_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize(self, event, store):
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
@@ -146,7 +147,7 @@ class EventContext:
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_ids = yield self.get_prev_state_ids()
|
||||
prev_state_ids = await self.get_prev_state_ids()
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
@@ -214,8 +215,7 @@ class EventContext:
|
||||
|
||||
return self._state_group
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_current_state_ids(self):
|
||||
async def get_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""
|
||||
Gets the room state map, including this event - ie, the state in ``state_group``
|
||||
|
||||
@@ -224,32 +224,31 @@ class EventContext:
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Returns None if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
yield self._ensure_fetched()
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_prev_state_ids(self):
|
||||
async def get_prev_state_ids(self):
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
dict[(str, str), str]|None: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
yield self._ensure_fetched()
|
||||
await self._ensure_fetched()
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
@@ -269,8 +268,8 @@ class EventContext:
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
def _ensure_fetched(self):
|
||||
return defer.succeed(None)
|
||||
async def _ensure_fetched(self):
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@@ -303,21 +302,20 @@ class _AsyncEventContextImpl(EventContext):
|
||||
_event_state_key = attr.ib(default=None)
|
||||
_fetching_state_deferred = attr.ib(default=None)
|
||||
|
||||
def _ensure_fetched(self):
|
||||
async def _ensure_fetched(self):
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
return make_deferred_yieldable(self._fetching_state_deferred)
|
||||
return await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _fill_out_state(self):
|
||||
async def _fill_out_state(self):
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
return
|
||||
|
||||
self._current_state_ids = yield self._storage.state.get_state_ids_for_group(
|
||||
self._current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
if self._event_state_key is not None:
|
||||
|
||||
@@ -13,7 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.types import Requester
|
||||
|
||||
|
||||
class ThirdPartyEventRules(object):
|
||||
@@ -39,76 +41,79 @@ class ThirdPartyEventRules(object):
|
||||
config=config, http_client=hs.get_simple_http_client()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_event_allowed(self, event, context):
|
||||
async def check_event_allowed(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> bool:
|
||||
"""Check if a provided event should be allowed in the given context.
|
||||
|
||||
Args:
|
||||
event (synapse.events.EventBase): The event to be checked.
|
||||
context (synapse.events.snapshot.EventContext): The context of the event.
|
||||
event: The event to be checked.
|
||||
context: The context of the event.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
||||
True if the event should be allowed, False if not.
|
||||
"""
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
|
||||
prev_state_ids = yield context.get_prev_state_ids()
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
# Retrieve the state events from the database.
|
||||
state_events = {}
|
||||
for key, event_id in prev_state_ids.items():
|
||||
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
|
||||
state_events[key] = await self.store.get_event(event_id, allow_none=True)
|
||||
|
||||
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
||||
ret = await self.third_party_rules.check_event_allowed(event, state_events)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_create_room(self, requester, config, is_requester_admin):
|
||||
async def on_create_room(
|
||||
self, requester: Requester, config: dict, is_requester_admin: bool
|
||||
) -> bool:
|
||||
"""Intercept requests to create room to allow, deny or update the
|
||||
request config.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
config (dict): The creation config from the client.
|
||||
is_requester_admin (bool): If the requester is an admin
|
||||
requester
|
||||
config: The creation config from the client.
|
||||
is_requester_admin: If the requester is an admin
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool]: Whether room creation is allowed or denied.
|
||||
Whether room creation is allowed or denied.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
|
||||
ret = yield self.third_party_rules.on_create_room(
|
||||
ret = await self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_threepid_can_be_invited(self, medium, address, room_id):
|
||||
async def check_threepid_can_be_invited(
|
||||
self, medium: str, address: str, room_id: str
|
||||
) -> bool:
|
||||
"""Check if a provided 3PID can be invited in the given room.
|
||||
|
||||
Args:
|
||||
medium (str): The 3PID's medium.
|
||||
address (str): The 3PID's address.
|
||||
room_id (str): The room we want to invite the threepid to.
|
||||
medium: The 3PID's medium.
|
||||
address: The 3PID's address.
|
||||
room_id: The room we want to invite the threepid to.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool], True if the 3PID can be invited, False if not.
|
||||
True if the 3PID can be invited, False if not.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
||||
room_state_events = yield self.store.get_events(state_ids.values())
|
||||
state_ids = await self.store.get_filtered_current_state_ids(room_id)
|
||||
room_state_events = await self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
||||
ret = await self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events
|
||||
)
|
||||
return ret
|
||||
|
||||
@@ -18,8 +18,6 @@ from typing import Any, Mapping, Union
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
@@ -337,8 +335,9 @@ class EventClientSerializer(object):
|
||||
hs.config.experimental_msc1849_support_enabled
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
|
||||
async def serialize_event(
|
||||
self, event, time_now, bundle_aggregations=True, **kwargs
|
||||
):
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
@@ -348,7 +347,7 @@ class EventClientSerializer(object):
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: The serialized event
|
||||
dict: The serialized event
|
||||
"""
|
||||
# To handle the case of presence events and the like
|
||||
if not isinstance(event, EventBase):
|
||||
@@ -363,8 +362,8 @@ class EventClientSerializer(object):
|
||||
if not event.internal_metadata.is_redacted() and (
|
||||
self.experimental_msc1849_support_enabled and bundle_aggregations
|
||||
):
|
||||
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
||||
references = yield self.store.get_relations_for_event(
|
||||
annotations = await self.store.get_aggregation_groups_for_event(event_id)
|
||||
references = await self.store.get_relations_for_event(
|
||||
event_id, RelationTypes.REFERENCE, direction="f"
|
||||
)
|
||||
|
||||
@@ -378,7 +377,7 @@ class EventClientSerializer(object):
|
||||
|
||||
edit = None
|
||||
if event.type == EventTypes.Message:
|
||||
edit = yield self.store.get_applicable_edit(event_id)
|
||||
edit = await self.store.get_applicable_edit(event_id)
|
||||
|
||||
if edit:
|
||||
# If there is an edit replace the content, preserving existing
|
||||
|
||||
@@ -135,7 +135,7 @@ class FederationClient(FederationBase):
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
a Awaitable which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.labels(query_type).inc()
|
||||
@@ -157,7 +157,7 @@ class FederationClient(FederationBase):
|
||||
content (dict): The query content.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
an Awaitable which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.labels("client_device_keys").inc()
|
||||
@@ -180,7 +180,7 @@ class FederationClient(FederationBase):
|
||||
content (dict): The query content.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
an Awaitable which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
@@ -900,7 +900,7 @@ class FederationClient(FederationBase):
|
||||
party instance
|
||||
|
||||
Returns:
|
||||
Deferred[Dict[str, Any]]: The response from the remote server, or None if
|
||||
Awaitable[Dict[str, Any]]: The response from the remote server, or None if
|
||||
`remote_server` is the same as the local server_name
|
||||
|
||||
Raises:
|
||||
|
||||
@@ -37,8 +37,8 @@ from sortedcontainers import SortedDict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
@@ -22,6 +22,7 @@ from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
@@ -39,7 +40,6 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
|
||||
@@ -288,8 +288,7 @@ class FederationSender(object):
|
||||
for destination in destinations:
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu, order)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_read_receipt(self, receipt: ReadReceipt):
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
|
||||
Args:
|
||||
@@ -330,9 +329,7 @@ class FederationSender(object):
|
||||
room_id = receipt.room_id
|
||||
|
||||
# Work out which remote servers should be poked and poke them.
|
||||
domains = yield defer.ensureDeferred(
|
||||
self.state.get_current_hosts_in_room(room_id)
|
||||
)
|
||||
domains = await self.state.get_current_hosts_in_room(room_id)
|
||||
domains = [
|
||||
d
|
||||
for d in domains
|
||||
@@ -387,8 +384,7 @@ class FederationSender(object):
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
@preserve_fn # the caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def send_presence(self, states: List[UserPresenceState]):
|
||||
async def send_presence(self, states: List[UserPresenceState]):
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
@@ -423,7 +419,7 @@ class FederationSender(object):
|
||||
if not states_map:
|
||||
break
|
||||
|
||||
yield self._process_presence_inner(list(states_map.values()))
|
||||
await self._process_presence_inner(list(states_map.values()))
|
||||
except Exception:
|
||||
logger.exception("Error sending presence states to servers")
|
||||
finally:
|
||||
@@ -450,14 +446,11 @@ class FederationSender(object):
|
||||
self._get_per_destination_queue(destination).send_presence(states)
|
||||
|
||||
@measure_func("txnqueue._process_presence")
|
||||
@defer.inlineCallbacks
|
||||
def _process_presence_inner(self, states: List[UserPresenceState]):
|
||||
async def _process_presence_inner(self, states: List[UserPresenceState]):
|
||||
"""Given a list of states populate self.pending_presence_by_dest and
|
||||
poke to send a new transaction to each destination
|
||||
"""
|
||||
hosts_and_states = yield defer.ensureDeferred(
|
||||
get_interested_remotes(self.store, states, self.state)
|
||||
)
|
||||
hosts_and_states = await get_interested_remotes(self.store, states, self.state)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
for destination in destinations:
|
||||
|
||||
@@ -24,12 +24,12 @@ from synapse.api.errors import (
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
)
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
|
||||
@@ -337,6 +337,28 @@ class PerDestinationQueue(object):
|
||||
(e.retry_last_ts + e.retry_interval) / 1000.0
|
||||
),
|
||||
)
|
||||
|
||||
if e.retry_interval > 60 * 60 * 1000:
|
||||
# we won't retry for another hour!
|
||||
# (this suggests a significant outage)
|
||||
# We drop pending PDUs and EDUs because otherwise they will
|
||||
# rack up indefinitely.
|
||||
# Note that:
|
||||
# - the EDUs that are being dropped here are those that we can
|
||||
# afford to drop (specifically, only typing notifications,
|
||||
# read receipts and presence updates are being dropped here)
|
||||
# - Other EDUs such as to_device messages are queued with a
|
||||
# different mechanism
|
||||
# - this is all volatile state that would be lost if the
|
||||
# federation sender restarted anyway
|
||||
|
||||
# dropping read receipts is a bit sad but should be solved
|
||||
# through another mechanism, because this is all volatile!
|
||||
self._pending_pdus = []
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_rrs = {}
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
except HttpResponseException as e:
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
@@ -54,7 +54,10 @@ class TransactionManager(object):
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
async def send_new_transaction(
|
||||
self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
|
||||
self,
|
||||
destination: str,
|
||||
pending_pdus: List[Tuple[EventBase, int]],
|
||||
pending_edus: List[Edu],
|
||||
):
|
||||
|
||||
# Make a transaction-sending opentracing span. This span follows on from
|
||||
|
||||
@@ -18,8 +18,6 @@ import logging
|
||||
import urllib
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.urls import (
|
||||
@@ -51,7 +49,7 @@ class TransportLayerClient(object):
|
||||
event_id (str): The event we want the context at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
Awaitable: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id)
|
||||
|
||||
@@ -75,7 +73,7 @@ class TransportLayerClient(object):
|
||||
giving up. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
Awaitable: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id)
|
||||
|
||||
@@ -96,7 +94,7 @@ class TransportLayerClient(object):
|
||||
limit (int)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
Awaitable: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug(
|
||||
"backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
|
||||
@@ -118,16 +116,15 @@ class TransportLayerClient(object):
|
||||
destination, path=path, args=args, try_trailing_slash_on_400=True
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_transaction(self, transaction, json_data_callback=None):
|
||||
async def send_transaction(self, transaction, json_data_callback=None):
|
||||
""" Sends the given Transaction to its destination
|
||||
|
||||
Args:
|
||||
transaction (Transaction)
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
|
||||
Fails with ``HTTPRequestException`` if we get an HTTP response
|
||||
@@ -154,7 +151,7 @@ class TransportLayerClient(object):
|
||||
|
||||
path = _create_v1_path("/send/%s", transaction.transaction_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
transaction.destination,
|
||||
path=path,
|
||||
data=json_data,
|
||||
@@ -166,14 +163,13 @@ class TransportLayerClient(object):
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_query(
|
||||
async def make_query(
|
||||
self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False
|
||||
):
|
||||
path = _create_v1_path("/query/%s", query_type)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
content = await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args=args,
|
||||
@@ -184,9 +180,10 @@ class TransportLayerClient(object):
|
||||
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_membership_event(self, destination, room_id, user_id, membership, params):
|
||||
async def make_membership_event(
|
||||
self, destination, room_id, user_id, membership, params
|
||||
):
|
||||
"""Asks a remote server to build and sign us a membership event
|
||||
|
||||
Note that this does not append any events to any graphs.
|
||||
@@ -200,7 +197,7 @@ class TransportLayerClient(object):
|
||||
request.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body (ie, the new event).
|
||||
|
||||
Fails with ``HTTPRequestException`` if we get an HTTP response
|
||||
@@ -231,7 +228,7 @@ class TransportLayerClient(object):
|
||||
ignore_backoff = True
|
||||
retry_on_dns_fail = True
|
||||
|
||||
content = yield self.client.get_json(
|
||||
content = await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args=params,
|
||||
@@ -242,34 +239,31 @@ class TransportLayerClient(object):
|
||||
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join_v1(self, destination, room_id, event_id, content):
|
||||
async def send_join_v1(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join_v2(self, destination, room_id, event_id, content):
|
||||
async def send_join_v2(self, destination, room_id, event_id, content):
|
||||
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_leave_v1(self, destination, room_id, event_id, content):
|
||||
async def send_leave_v1(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
@@ -282,12 +276,11 @@ class TransportLayerClient(object):
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_leave_v2(self, destination, room_id, event_id, content):
|
||||
async def send_leave_v2(self, destination, room_id, event_id, content):
|
||||
path = _create_v2_path("/send_leave/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
@@ -300,31 +293,28 @@ class TransportLayerClient(object):
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite_v1(self, destination, room_id, event_id, content):
|
||||
async def send_invite_v1(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite_v2(self, destination, room_id, event_id, content):
|
||||
async def send_invite_v2(self, destination, room_id, event_id, content):
|
||||
path = _create_v2_path("/invite/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_public_rooms(
|
||||
async def get_public_rooms(
|
||||
self,
|
||||
remote_server: str,
|
||||
limit: Optional[int] = None,
|
||||
@@ -355,7 +345,7 @@ class TransportLayerClient(object):
|
||||
data["filter"] = search_filter
|
||||
|
||||
try:
|
||||
response = yield self.client.post_json(
|
||||
response = await self.client.post_json(
|
||||
destination=remote_server, path=path, data=data, ignore_backoff=True
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
@@ -381,7 +371,7 @@ class TransportLayerClient(object):
|
||||
args["since"] = [since_token]
|
||||
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
response = await self.client.get_json(
|
||||
destination=remote_server, path=path, args=args, ignore_backoff=True
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
@@ -396,29 +386,26 @@ class TransportLayerClient(object):
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def exchange_third_party_invite(self, destination, room_id, event_dict):
|
||||
async def exchange_third_party_invite(self, destination, room_id, event_dict):
|
||||
path = _create_v1_path("/exchange_third_party_invite/%s", room_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=event_dict
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
async def get_event_auth(self, destination, room_id, event_id):
|
||||
path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.get_json(destination=destination, path=path)
|
||||
content = await self.client.get_json(destination=destination, path=path)
|
||||
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_client_keys(self, destination, query_content, timeout):
|
||||
async def query_client_keys(self, destination, query_content, timeout):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
|
||||
@@ -453,14 +440,13 @@ class TransportLayerClient(object):
|
||||
"""
|
||||
path = _create_v1_path("/user/keys/query")
|
||||
|
||||
content = yield self.client.post_json(
|
||||
content = await self.client.post_json(
|
||||
destination=destination, path=path, data=query_content, timeout=timeout
|
||||
)
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_user_devices(self, destination, user_id, timeout):
|
||||
async def query_user_devices(self, destination, user_id, timeout):
|
||||
"""Query the devices for a user id hosted on a remote server.
|
||||
|
||||
Response:
|
||||
@@ -493,14 +479,13 @@ class TransportLayerClient(object):
|
||||
"""
|
||||
path = _create_v1_path("/user/devices/%s", user_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
content = await self.client.get_json(
|
||||
destination=destination, path=path, timeout=timeout
|
||||
)
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content, timeout):
|
||||
async def claim_client_keys(self, destination, query_content, timeout):
|
||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||
|
||||
Request:
|
||||
@@ -532,14 +517,13 @@ class TransportLayerClient(object):
|
||||
|
||||
path = _create_v1_path("/user/keys/claim")
|
||||
|
||||
content = yield self.client.post_json(
|
||||
content = await self.client.post_json(
|
||||
destination=destination, path=path, data=query_content, timeout=timeout
|
||||
)
|
||||
return content
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_missing_events(
|
||||
async def get_missing_events(
|
||||
self,
|
||||
destination,
|
||||
room_id,
|
||||
@@ -551,7 +535,7 @@ class TransportLayerClient(object):
|
||||
):
|
||||
path = _create_v1_path("/get_missing_events/%s", room_id)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
content = await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data={
|
||||
|
||||
@@ -41,8 +41,6 @@ from typing import Tuple
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
@@ -72,8 +70,9 @@ class GroupAttestationSigning(object):
|
||||
self.server_name = hs.hostname
|
||||
self.signing_key = hs.signing_key
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def verify_attestation(self, attestation, group_id, user_id, server_name=None):
|
||||
async def verify_attestation(
|
||||
self, attestation, group_id, user_id, server_name=None
|
||||
):
|
||||
"""Verifies that the given attestation matches the given parameters.
|
||||
|
||||
An optional server_name can be supplied to explicitly set which server's
|
||||
@@ -102,7 +101,7 @@ class GroupAttestationSigning(object):
|
||||
if valid_until_ms < now:
|
||||
raise SynapseError(400, "Attestation expired")
|
||||
|
||||
yield self.keyring.verify_json_for_server(
|
||||
await self.keyring.verify_json_for_server(
|
||||
server_name, attestation, now, "Group attestation"
|
||||
)
|
||||
|
||||
@@ -142,8 +141,7 @@ class GroupAttestionRenewer(object):
|
||||
self._start_renew_attestations, 30 * 60 * 1000
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_renew_attestation(self, group_id, user_id, content):
|
||||
async def on_renew_attestation(self, group_id, user_id, content):
|
||||
"""When a remote updates an attestation
|
||||
"""
|
||||
attestation = content["attestation"]
|
||||
@@ -151,11 +149,11 @@ class GroupAttestionRenewer(object):
|
||||
if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
|
||||
raise SynapseError(400, "Neither user not group are on this server")
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
await self.attestations.verify_attestation(
|
||||
attestation, user_id=user_id, group_id=group_id
|
||||
)
|
||||
|
||||
yield self.store.update_remote_attestion(group_id, user_id, attestation)
|
||||
await self.store.update_remote_attestion(group_id, user_id, attestation)
|
||||
|
||||
return {}
|
||||
|
||||
@@ -172,8 +170,7 @@ class GroupAttestionRenewer(object):
|
||||
now + UPDATE_ATTESTATION_TIME_MS
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _renew_attestation(group_user: Tuple[str, str]):
|
||||
async def _renew_attestation(group_user: Tuple[str, str]):
|
||||
group_id, user_id = group_user
|
||||
try:
|
||||
if not self.is_mine_id(group_id):
|
||||
@@ -186,16 +183,16 @@ class GroupAttestionRenewer(object):
|
||||
user_id,
|
||||
group_id,
|
||||
)
|
||||
yield self.store.remove_attestation_renewal(group_id, user_id)
|
||||
await self.store.remove_attestation_renewal(group_id, user_id)
|
||||
return
|
||||
|
||||
attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
|
||||
yield self.transport_client.renew_group_attestation(
|
||||
await self.transport_client.renew_group_attestation(
|
||||
destination, group_id, user_id, content={"attestation": attestation}
|
||||
)
|
||||
|
||||
yield self.store.update_attestation_renewal(
|
||||
await self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except (RequestSendFailed, HttpResponseException) as e:
|
||||
|
||||
@@ -719,6 +719,27 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
async def change_user_admin_in_group(
|
||||
self, group_id, user_id, want_admin, requester_user_id, content
|
||||
):
|
||||
"""Promotes or demotes a user in a group.
|
||||
"""
|
||||
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
if requester_user_id == user_id:
|
||||
raise SynapseError(400, "User cannot target themselves")
|
||||
|
||||
is_admin = await self.store.is_user_admin_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
if not is_admin:
|
||||
raise SynapseError(403, "User is not admin in group")
|
||||
|
||||
await self.store.change_user_admin_in_group(group_id, user_id, want_admin)
|
||||
|
||||
return {}
|
||||
|
||||
async def remove_user_from_group(
|
||||
self, group_id, user_id, requester_user_id, content
|
||||
):
|
||||
|
||||
@@ -26,11 +26,6 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
|
||||
try:
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
except ImportError:
|
||||
load_jinja2_templates = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -47,9 +42,11 @@ class AccountValidityHandler(object):
|
||||
if (
|
||||
self._account_validity.enabled
|
||||
and self._account_validity.renew_by_email_enabled
|
||||
and load_jinja2_templates
|
||||
):
|
||||
# Don't do email-specific configuration if renewal by email is disabled.
|
||||
self._template_html = self.config.account_validity_template_html
|
||||
self._template_text = self.config.account_validity_template_text
|
||||
|
||||
try:
|
||||
app_name = self.hs.config.email_app_name
|
||||
|
||||
@@ -65,17 +62,6 @@ class AccountValidityHandler(object):
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
self._template_html, self._template_text = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[
|
||||
self.config.email_expiry_template_html,
|
||||
self.config.email_expiry_template_text,
|
||||
],
|
||||
apply_format_ts_filter=True,
|
||||
apply_mxc_to_http_filter=True,
|
||||
public_baseurl=self.config.public_baseurl,
|
||||
)
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
def send_emails():
|
||||
# run as a background process to make sure that the database transactions
|
||||
|
||||
@@ -17,7 +17,6 @@ import logging
|
||||
|
||||
import twisted
|
||||
import twisted.internet.error
|
||||
from twisted.internet import defer
|
||||
from twisted.web import server, static
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
@@ -41,8 +40,7 @@ class AcmeHandler(object):
|
||||
self.reactor = hs.get_reactor()
|
||||
self._acme_domain = hs.config.acme_domain
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_listening(self):
|
||||
async def start_listening(self):
|
||||
from synapse.handlers import acme_issuing_service
|
||||
|
||||
# Configure logging for txacme, if you need to debug
|
||||
@@ -82,18 +80,17 @@ class AcmeHandler(object):
|
||||
self._issuer._registered = False
|
||||
|
||||
try:
|
||||
yield self._issuer._ensure_registered()
|
||||
await self._issuer._ensure_registered()
|
||||
except Exception:
|
||||
logger.error(ACME_REGISTER_FAIL_ERROR)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
async def provision_certificate(self):
|
||||
|
||||
logger.warning("Reprovisioning %s", self._acme_domain)
|
||||
|
||||
try:
|
||||
yield self._issuer.issue_cert(self._acme_domain)
|
||||
await self._issuer.issue_cert(self._acme_domain)
|
||||
except Exception:
|
||||
logger.exception("Fail!")
|
||||
raise
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.metrics import (
|
||||
event_processing_loop_room_count,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util import log_failure
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -100,10 +99,11 @@ class ApplicationServicesHandler(object):
|
||||
|
||||
if not self.started_scheduler:
|
||||
|
||||
def start_scheduler():
|
||||
return self.scheduler.start().addErrback(
|
||||
log_failure, "Application Services Failure"
|
||||
)
|
||||
async def start_scheduler():
|
||||
try:
|
||||
return await self.scheduler.start()
|
||||
except Exception:
|
||||
logger.error("Application Services Failure")
|
||||
|
||||
run_as_background_process("as_scheduler", start_scheduler)
|
||||
self.started_scheduler = True
|
||||
|
||||
@@ -42,7 +42,6 @@ from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
from synapse.types import Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
@@ -132,18 +131,17 @@ class AuthHandler(BaseHandler):
|
||||
# after the SSO completes and before redirecting them back to their client.
|
||||
# It notifies the user they are about to give access to their matrix account
|
||||
# to the client.
|
||||
self._sso_redirect_confirm_template = load_jinja2_templates(
|
||||
hs.config.sso_template_dir, ["sso_redirect_confirm.html"],
|
||||
)[0]
|
||||
self._sso_redirect_confirm_template = hs.config.sso_redirect_confirm_template
|
||||
|
||||
# The following template is shown during user interactive authentication
|
||||
# in the fallback auth scenario. It notifies the user that they are
|
||||
# authenticating for an operation to occur on their account.
|
||||
self._sso_auth_confirm_template = load_jinja2_templates(
|
||||
hs.config.sso_template_dir, ["sso_auth_confirm.html"],
|
||||
)[0]
|
||||
self._sso_auth_confirm_template = hs.config.sso_auth_confirm_template
|
||||
|
||||
# The following template is shown after a successful user interactive
|
||||
# authentication session. It tells the user they can close the window.
|
||||
self._sso_auth_success_template = hs.config.sso_auth_success_template
|
||||
|
||||
# The following template is shown during the SSO authentication process if
|
||||
# the account is deactivated.
|
||||
self._sso_account_deactivated_template = (
|
||||
@@ -162,7 +160,7 @@ class AuthHandler(BaseHandler):
|
||||
request_body: Dict[str, Any],
|
||||
clientip: str,
|
||||
description: str,
|
||||
) -> dict:
|
||||
) -> Tuple[dict, str]:
|
||||
"""
|
||||
Checks that the user is who they claim to be, via a UI auth.
|
||||
|
||||
@@ -183,9 +181,14 @@ class AuthHandler(BaseHandler):
|
||||
describes the operation happening on their account.
|
||||
|
||||
Returns:
|
||||
The parameters for this request (which may
|
||||
A tuple of (params, session_id).
|
||||
|
||||
'params' contains the parameters for this request (which may
|
||||
have been given only in a previous call).
|
||||
|
||||
'session_id' is the ID of this session, either passed in by the
|
||||
client or assigned by this call
|
||||
|
||||
Raises:
|
||||
InteractiveAuthIncompleteError if the client has not yet completed
|
||||
any of the permitted login flows
|
||||
@@ -207,7 +210,7 @@ class AuthHandler(BaseHandler):
|
||||
flows = [[login_type] for login_type in self._supported_ui_auth_types]
|
||||
|
||||
try:
|
||||
result, params, _ = await self.check_auth(
|
||||
result, params, session_id = await self.check_ui_auth(
|
||||
flows, request, request_body, clientip, description
|
||||
)
|
||||
except LoginError:
|
||||
@@ -230,7 +233,7 @@ class AuthHandler(BaseHandler):
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Invalid auth")
|
||||
|
||||
return params
|
||||
return params, session_id
|
||||
|
||||
def get_enabled_auth_types(self):
|
||||
"""Return the enabled user-interactive authentication types
|
||||
@@ -240,7 +243,7 @@ class AuthHandler(BaseHandler):
|
||||
"""
|
||||
return self.checkers.keys()
|
||||
|
||||
async def check_auth(
|
||||
async def check_ui_auth(
|
||||
self,
|
||||
flows: List[List[str]],
|
||||
request: SynapseRequest,
|
||||
@@ -363,7 +366,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
if not authdict:
|
||||
raise InteractiveAuthIncompleteError(
|
||||
self._auth_dict_for_flows(flows, session.session_id)
|
||||
session.session_id, self._auth_dict_for_flows(flows, session.session_id)
|
||||
)
|
||||
|
||||
# check auth type currently being presented
|
||||
@@ -410,7 +413,7 @@ class AuthHandler(BaseHandler):
|
||||
ret = self._auth_dict_for_flows(flows, session.session_id)
|
||||
ret["completed"] = list(creds)
|
||||
ret.update(errordict)
|
||||
raise InteractiveAuthIncompleteError(ret)
|
||||
raise InteractiveAuthIncompleteError(session.session_id, ret)
|
||||
|
||||
async def add_oob_auth(
|
||||
self, stagetype: str, authdict: Dict[str, Any], clientip: str
|
||||
|
||||
@@ -57,13 +57,10 @@ class EventStreamHandler(BaseHandler):
|
||||
timeout=0,
|
||||
as_client_event=True,
|
||||
affect_presence=True,
|
||||
only_keys=None,
|
||||
room_id=None,
|
||||
is_guest=False,
|
||||
):
|
||||
"""Fetches the events stream for a given user.
|
||||
|
||||
If `only_keys` is not None, events from keys will be sent down.
|
||||
"""
|
||||
|
||||
if room_id:
|
||||
@@ -93,7 +90,6 @@ class EventStreamHandler(BaseHandler):
|
||||
auth_user,
|
||||
pagin_config,
|
||||
timeout,
|
||||
only_keys=only_keys,
|
||||
is_guest=is_guest,
|
||||
explicit_room_id=room_id,
|
||||
)
|
||||
|
||||
@@ -71,7 +71,7 @@ from synapse.replication.http.federation import (
|
||||
)
|
||||
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
||||
from synapse.state import StateResolutionStore, resolve_events_with_store
|
||||
from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.distributor import user_joined_room
|
||||
@@ -2064,7 +2064,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if not auth_events:
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = await self.auth.compute_auth_events(
|
||||
auth_events_ids = self.auth.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events_x = await self.store.get_events(auth_events_ids)
|
||||
@@ -2470,7 +2470,7 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
|
||||
current_state_ids = await context.get_current_state_ids()
|
||||
current_state_ids = dict(current_state_ids)
|
||||
current_state_ids = dict(current_state_ids) # type: ignore
|
||||
|
||||
current_state_ids.update(state_updates)
|
||||
|
||||
|
||||
@@ -23,39 +23,32 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _create_rerouter(func_name):
|
||||
"""Returns a function that looks at the group id and calls the function
|
||||
"""Returns an async function that looks at the group id and calls the function
|
||||
on federation or the local group server if the group is local
|
||||
"""
|
||||
|
||||
def f(self, group_id, *args, **kwargs):
|
||||
async def f(self, group_id, *args, **kwargs):
|
||||
if self.is_mine_id(group_id):
|
||||
return getattr(self.groups_server_handler, func_name)(
|
||||
return await getattr(self.groups_server_handler, func_name)(
|
||||
group_id, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
destination = get_domain_from_id(group_id)
|
||||
d = getattr(self.transport_client, func_name)(
|
||||
destination, group_id, *args, **kwargs
|
||||
)
|
||||
|
||||
# Capture errors returned by the remote homeserver and
|
||||
# re-throw specific errors as SynapseErrors. This is so
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
def http_response_errback(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
try:
|
||||
return await getattr(self.transport_client, func_name)(
|
||||
destination, group_id, *args, **kwargs
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# Capture errors returned by the remote homeserver and
|
||||
# re-throw specific errors as SynapseErrors. This is so
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
raise e.to_synapse_error()
|
||||
|
||||
def request_failed_errback(failure):
|
||||
failure.trap(RequestSendFailed)
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
d.addErrback(http_response_errback)
|
||||
d.addErrback(request_failed_errback)
|
||||
return d
|
||||
|
||||
return f
|
||||
|
||||
|
||||
@@ -468,6 +461,25 @@ class GroupsLocalHandler(GroupsLocalWorkerHandler):
|
||||
|
||||
return {"state": "invite", "user_profile": user_profile}
|
||||
|
||||
async def change_user_admin_in_group(
|
||||
self, group_id, user_id, want_admin, requester_user_id, content
|
||||
):
|
||||
"""Promotes or demotes a user in a group.
|
||||
"""
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
raise SynapseError(400, "User not on this server")
|
||||
|
||||
# TODO: We should probably support federation, but this is fine for now
|
||||
if not self.is_mine_id(group_id):
|
||||
raise SynapseError(400, "Group not on this server")
|
||||
|
||||
res = await self.groups_server_handler.change_user_admin_in_group(
|
||||
group_id, user_id, want_admin, requester_user_id, content
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
async def remove_user_from_group(
|
||||
self, group_id, user_id, requester_user_id, content
|
||||
):
|
||||
|
||||
@@ -22,14 +22,10 @@ import urllib.parse
|
||||
from typing import Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
from canonicaljson import json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet.error import TimeoutError
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
@@ -628,9 +624,9 @@ class IdentityHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if "mxid" in data:
|
||||
if "signatures" not in data:
|
||||
raise AuthError(401, "No signatures on 3pid binding")
|
||||
await self._verify_any_signature(data, id_server)
|
||||
# note: we used to verify the identity server's signature here, but no longer
|
||||
# require or validate it. See the following for context:
|
||||
# https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
|
||||
return data["mxid"]
|
||||
except TimeoutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
@@ -751,30 +747,6 @@ class IdentityHandler(BaseHandler):
|
||||
mxid = lookup_results["mappings"].get(lookup_value)
|
||||
return mxid
|
||||
|
||||
async def _verify_any_signature(self, data, server_hostname):
|
||||
if server_hostname not in data["signatures"]:
|
||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||
for key_name, signature in data["signatures"][server_hostname].items():
|
||||
try:
|
||||
key_data = await self.blacklisting_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/pubkey/%s"
|
||||
% (id_server_scheme, server_hostname, key_name)
|
||||
)
|
||||
except TimeoutError:
|
||||
raise SynapseError(500, "Timed out contacting identity server")
|
||||
if "public_key" not in key_data:
|
||||
raise AuthError(
|
||||
401, "No public key named %s from %s" % (key_name, server_hostname)
|
||||
)
|
||||
verify_signed_json(
|
||||
data,
|
||||
server_hostname,
|
||||
decode_verify_key_bytes(
|
||||
key_name, decode_base64(key_data["public_key"])
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
async def ask_id_server_for_third_party_invite(
|
||||
self,
|
||||
requester: Requester,
|
||||
|
||||
@@ -109,7 +109,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
|
||||
rooms_ret = []
|
||||
|
||||
now_token = await self.hs.get_event_sources().get_current_token()
|
||||
now_token = self.hs.get_event_sources().get_current_token()
|
||||
|
||||
presence_stream = self.hs.get_event_sources().sources["presence"]
|
||||
pagination_config = PaginationConfig(from_token=now_token)
|
||||
@@ -360,7 +360,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
current_state.values(), time_now
|
||||
)
|
||||
|
||||
now_token = await self.hs.get_event_sources().get_current_token()
|
||||
now_token = self.hs.get_event_sources().get_current_token()
|
||||
|
||||
limit = pagin_config.limit if pagin_config else None
|
||||
if limit is None:
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
|
||||
@@ -45,7 +45,7 @@ from synapse.events.validator import EventValidator
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
Collection,
|
||||
@@ -93,11 +93,11 @@ class MessageHandler(object):
|
||||
|
||||
async def get_room_data(
|
||||
self,
|
||||
user_id: str = None,
|
||||
room_id: str = None,
|
||||
event_type: Optional[str] = None,
|
||||
state_key: str = "",
|
||||
is_guest: bool = False,
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
event_type: str,
|
||||
state_key: str,
|
||||
is_guest: bool,
|
||||
) -> dict:
|
||||
""" Get data from a room.
|
||||
|
||||
@@ -407,7 +407,7 @@ class EventCreationHandler(object):
|
||||
#
|
||||
# map from room id to time-of-last-attempt.
|
||||
#
|
||||
self._rooms_to_exclude_from_dummy_event_insertion = {} # type: dict[str, int]
|
||||
self._rooms_to_exclude_from_dummy_event_insertion = {} # type: Dict[str, int]
|
||||
|
||||
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
|
||||
# config options, but *only* if we have a configuration for which we are
|
||||
@@ -667,14 +667,14 @@ class EventCreationHandler(object):
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if event.is_state():
|
||||
prev_state = await self.deduplicate_state_event(event, context)
|
||||
if prev_state is not None:
|
||||
prev_event = await self.deduplicate_state_event(event, context)
|
||||
if prev_event is not None:
|
||||
logger.info(
|
||||
"Not bothering to persist state event %s duplicated by %s",
|
||||
event.event_id,
|
||||
prev_state.event_id,
|
||||
prev_event.event_id,
|
||||
)
|
||||
return prev_state
|
||||
return await self.store.get_stream_id_for_event(prev_event.event_id)
|
||||
|
||||
return await self.handle_new_client_event(
|
||||
requester=requester, event=event, context=context, ratelimit=ratelimit
|
||||
@@ -682,32 +682,37 @@ class EventCreationHandler(object):
|
||||
|
||||
async def deduplicate_state_event(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> None:
|
||||
) -> Optional[EventBase]:
|
||||
"""
|
||||
Checks whether event is in the latest resolved state in context.
|
||||
|
||||
If so, returns the version of the event in context.
|
||||
Otherwise, returns None.
|
||||
Args:
|
||||
event: The event to check for duplication.
|
||||
context: The event context.
|
||||
|
||||
Returns:
|
||||
The previous verion of the event is returned, if it is found in the
|
||||
event context. Otherwise, None is returned.
|
||||
"""
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
prev_event_id = prev_state_ids.get((event.type, event.state_key))
|
||||
if not prev_event_id:
|
||||
return
|
||||
return None
|
||||
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
||||
if not prev_event:
|
||||
return
|
||||
return None
|
||||
|
||||
if prev_event and event.user_id == prev_event.user_id:
|
||||
prev_content = encode_canonical_json(prev_event.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
return prev_event
|
||||
return
|
||||
return None
|
||||
|
||||
async def create_and_send_nonmember_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
event_dict: EventBase,
|
||||
event_dict: dict,
|
||||
ratelimit: bool = True,
|
||||
txn_id: Optional[str] = None,
|
||||
) -> Tuple[EventBase, int]:
|
||||
@@ -768,6 +773,15 @@ class EventCreationHandler(object):
|
||||
else:
|
||||
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
|
||||
|
||||
# we now ought to have some prev_events (unless it's a create event).
|
||||
#
|
||||
# do a quick sanity check here, rather than waiting until we've created the
|
||||
# event and then try to auth it (which fails with a somewhat confusing "No
|
||||
# create event in auth events")
|
||||
assert (
|
||||
builder.type == EventTypes.Create or len(prev_event_ids) > 0
|
||||
), "Attempting to create an event with no prev_events"
|
||||
|
||||
event = await builder.build(prev_event_ids=prev_event_ids)
|
||||
context = await self.state.compute_event_context(event)
|
||||
if requester:
|
||||
@@ -882,9 +896,7 @@ class EventCreationHandler(object):
|
||||
except Exception:
|
||||
# Ensure that we actually remove the entries in the push actions
|
||||
# staging area, if we calculated them.
|
||||
run_in_background(
|
||||
self.store.remove_push_actions_from_staging, event.event_id
|
||||
)
|
||||
await self.store.remove_push_actions_from_staging(event.event_id)
|
||||
raise
|
||||
|
||||
async def _validate_canonical_alias(
|
||||
@@ -962,7 +974,7 @@ class EventCreationHandler(object):
|
||||
# Validate a newly added alias or newly added alt_aliases.
|
||||
|
||||
original_alias = None
|
||||
original_alt_aliases = set()
|
||||
original_alt_aliases = [] # type: List[str]
|
||||
|
||||
original_event_id = event.unsigned.get("replaces_state")
|
||||
if original_event_id:
|
||||
@@ -1010,6 +1022,10 @@ class EventCreationHandler(object):
|
||||
|
||||
current_state_ids = await context.get_current_state_ids()
|
||||
|
||||
# We know this event is not an outlier, so this must be
|
||||
# non-None.
|
||||
assert current_state_ids is not None
|
||||
|
||||
state_to_include_ids = [
|
||||
e_id
|
||||
for k, e_id in current_state_ids.items()
|
||||
@@ -1061,7 +1077,7 @@ class EventCreationHandler(object):
|
||||
raise SynapseError(400, "Cannot redact event from a different room")
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = await self.auth.compute_auth_events(
|
||||
auth_events_ids = self.auth.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = await self.store.get_events(auth_events_ids)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Generic, List, Optional, Tuple, TypeVar
|
||||
from typing import TYPE_CHECKING, Dict, Generic, List, Optional, Tuple, TypeVar
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import attr
|
||||
@@ -38,10 +38,11 @@ from synapse.config import ConfigError
|
||||
from synapse.http.server import respond_with_html
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import UserID, map_username_to_mxid_localpart
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SESSION_COOKIE_NAME = b"oidc_session"
|
||||
@@ -91,7 +92,7 @@ class OidcHandler:
|
||||
"""Handles requests related to the OpenID Connect login flow.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: HomeServer):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._callback_url = hs.config.oidc_callback_url # type: str
|
||||
self._scopes = hs.config.oidc_scopes # type: List[str]
|
||||
self._client_auth = ClientAuth(
|
||||
@@ -121,9 +122,7 @@ class OidcHandler:
|
||||
self._hostname = hs.hostname # type: str
|
||||
self._server_name = hs.config.server_name # type: str
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
self._error_template = load_jinja2_templates(
|
||||
hs.config.sso_template_dir, ["sso_error.html"]
|
||||
)[0]
|
||||
self._error_template = hs.config.sso_error_template
|
||||
|
||||
# identifier for the external_ids table
|
||||
self._auth_provider_id = "oidc"
|
||||
|
||||
@@ -309,7 +309,7 @@ class PaginationHandler(object):
|
||||
room_token = pagin_config.from_token.room_key
|
||||
else:
|
||||
pagin_config.from_token = (
|
||||
await self.hs.get_event_sources().get_current_token_for_pagination()
|
||||
self.hs.get_event_sources().get_current_token_for_pagination()
|
||||
)
|
||||
room_token = pagin_config.from_token.room_key
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@ from typing_extensions import ContextManager
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, Membership, PresenceState
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.state import StateHandler
|
||||
from synapse.storage.data_stores.main import DataStore
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.descriptors import cached
|
||||
@@ -319,7 +319,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
is some spurious presence changes that will self-correct.
|
||||
"""
|
||||
# If the DB pool has already terminated, don't try updating
|
||||
if not self.store.db.is_running():
|
||||
if not self.store.db_pool.is_running():
|
||||
return
|
||||
|
||||
logger.info(
|
||||
|
||||
@@ -142,6 +142,7 @@ class RegistrationHandler(BaseHandler):
|
||||
address=None,
|
||||
bind_emails=[],
|
||||
by_admin=False,
|
||||
shadow_banned=False,
|
||||
):
|
||||
"""Registers a new client on the server.
|
||||
|
||||
@@ -159,6 +160,7 @@ class RegistrationHandler(BaseHandler):
|
||||
bind_emails (List[str]): list of emails to bind to this account.
|
||||
by_admin (bool): True if this registration is being made via the
|
||||
admin api, otherwise False.
|
||||
shadow_banned (bool): Shadow-ban the created user.
|
||||
Returns:
|
||||
str: user_id
|
||||
Raises:
|
||||
@@ -194,6 +196,7 @@ class RegistrationHandler(BaseHandler):
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
address=address,
|
||||
shadow_banned=shadow_banned,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -224,6 +227,7 @@ class RegistrationHandler(BaseHandler):
|
||||
make_guest=make_guest,
|
||||
create_profile_with_displayname=default_display_name,
|
||||
address=address,
|
||||
shadow_banned=shadow_banned,
|
||||
)
|
||||
|
||||
# Successfully registered
|
||||
@@ -529,6 +533,7 @@ class RegistrationHandler(BaseHandler):
|
||||
admin=False,
|
||||
user_type=None,
|
||||
address=None,
|
||||
shadow_banned=False,
|
||||
):
|
||||
"""Register user in the datastore.
|
||||
|
||||
@@ -546,9 +551,10 @@ class RegistrationHandler(BaseHandler):
|
||||
user_type (str|None): type of user. One of the values from
|
||||
api.constants.UserTypes, or None for a normal user.
|
||||
address (str|None): the IP address used to perform the registration.
|
||||
shadow_banned (bool): Whether to shadow-ban the user
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
Awaitable
|
||||
"""
|
||||
if self.hs.config.worker_app:
|
||||
return self._register_client(
|
||||
@@ -561,6 +567,7 @@ class RegistrationHandler(BaseHandler):
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
address=address,
|
||||
shadow_banned=shadow_banned,
|
||||
)
|
||||
else:
|
||||
return self.store.register_user(
|
||||
@@ -572,6 +579,7 @@ class RegistrationHandler(BaseHandler):
|
||||
create_profile_with_displayname=create_profile_with_displayname,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
shadow_banned=shadow_banned,
|
||||
)
|
||||
|
||||
async def register_device(
|
||||
|
||||
@@ -22,7 +22,7 @@ import logging
|
||||
import math
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventTypes,
|
||||
@@ -32,11 +32,14 @@ from synapse.api.constants import (
|
||||
RoomEncryptionAlgorithms,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import copy_power_levels_contents
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
Requester,
|
||||
RoomAlias,
|
||||
RoomID,
|
||||
@@ -53,6 +56,9 @@ from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
id_server_scheme = "https://"
|
||||
@@ -61,7 +67,7 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000
|
||||
|
||||
|
||||
class RoomCreationHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super(RoomCreationHandler, self).__init__(hs)
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
@@ -92,7 +98,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
"guest_can_join": False,
|
||||
"power_level_content_override": {},
|
||||
},
|
||||
}
|
||||
} # type: Dict[str, Dict[str, Any]]
|
||||
|
||||
# Modify presets to selectively enable encryption by default per homeserver config
|
||||
for preset_name, preset_config in self._presets_dict.items():
|
||||
@@ -215,6 +221,9 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
old_room_state = await tombstone_context.get_current_state_ids()
|
||||
|
||||
# We know the tombstone event isn't an outlier so it has current state.
|
||||
assert old_room_state is not None
|
||||
|
||||
# update any aliases
|
||||
await self._move_aliases_to_new_room(
|
||||
requester, old_room_id, new_room_id, old_room_state
|
||||
@@ -528,17 +537,21 @@ class RoomCreationHandler(BaseHandler):
|
||||
logger.error("Unable to send updated alias events in new room: %s", e)
|
||||
|
||||
async def create_room(
|
||||
self, requester, config, ratelimit=True, creator_join_profile=None
|
||||
self,
|
||||
requester: Requester,
|
||||
config: JsonDict,
|
||||
ratelimit: bool = True,
|
||||
creator_join_profile: Optional[JsonDict] = None,
|
||||
) -> Tuple[dict, int]:
|
||||
""" Creates a new room.
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester):
|
||||
requester:
|
||||
The user who requested the room creation.
|
||||
config (dict) : A dict of configuration options.
|
||||
ratelimit (bool): set to False to disable the rate limiter
|
||||
config : A dict of configuration options.
|
||||
ratelimit: set to False to disable the rate limiter
|
||||
|
||||
creator_join_profile (dict|None):
|
||||
creator_join_profile:
|
||||
Set to override the displayname and avatar for the creating
|
||||
user in this room. If unset, displayname and avatar will be
|
||||
derived from the user's profile. If set, should contain the
|
||||
@@ -601,6 +614,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
room_alias = None
|
||||
if "room_alias_name" in config:
|
||||
for wchar in string.whitespace:
|
||||
if wchar in config["room_alias_name"]:
|
||||
@@ -611,8 +625,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
if mapping:
|
||||
raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE)
|
||||
else:
|
||||
room_alias = None
|
||||
|
||||
invite_list = config.get("invite", [])
|
||||
for i in invite_list:
|
||||
@@ -771,23 +783,30 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
async def _send_events_for_new_room(
|
||||
self,
|
||||
creator, # A Requester object.
|
||||
room_id,
|
||||
preset_config,
|
||||
invite_list,
|
||||
initial_state,
|
||||
creation_content,
|
||||
room_alias=None,
|
||||
power_level_content_override=None, # Doesn't apply when initial state has power level state event content
|
||||
creator_join_profile=None,
|
||||
creator: Requester,
|
||||
room_id: str,
|
||||
preset_config: str,
|
||||
invite_list: List[str],
|
||||
initial_state: StateMap,
|
||||
creation_content: JsonDict,
|
||||
room_alias: Optional[RoomAlias] = None,
|
||||
power_level_content_override: Optional[JsonDict] = None,
|
||||
creator_join_profile: Optional[JsonDict] = None,
|
||||
) -> int:
|
||||
"""Sends the initial events into a new room.
|
||||
|
||||
`power_level_content_override` doesn't apply when initial state has
|
||||
power level state event content.
|
||||
|
||||
Returns:
|
||||
The stream_id of the last event persisted.
|
||||
"""
|
||||
|
||||
def create(etype, content, **kwargs):
|
||||
creator_id = creator.user.to_string()
|
||||
|
||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||
|
||||
def create(etype: str, content: JsonDict, **kwargs) -> JsonDict:
|
||||
e = {"type": etype, "content": content}
|
||||
|
||||
e.update(event_keys)
|
||||
@@ -795,7 +814,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
return e
|
||||
|
||||
async def send(etype, content, **kwargs) -> int:
|
||||
async def send(etype: str, content: JsonDict, **kwargs) -> int:
|
||||
event = create(etype, content, **kwargs)
|
||||
logger.debug("Sending %s in new room", etype)
|
||||
(
|
||||
@@ -808,10 +827,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
config = self._presets_dict[preset_config]
|
||||
|
||||
creator_id = creator.user.to_string()
|
||||
|
||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||
|
||||
creation_content.update({"creator": creator_id})
|
||||
await send(etype=EventTypes.Create, content=creation_content)
|
||||
|
||||
@@ -852,7 +867,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
"kick": 50,
|
||||
"redact": 50,
|
||||
"invite": 50,
|
||||
}
|
||||
} # type: JsonDict
|
||||
|
||||
if config["original_invitees_have_ops"]:
|
||||
for invitee in invite_list:
|
||||
@@ -906,7 +921,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
return last_sent_stream_id
|
||||
|
||||
async def _generate_room_id(
|
||||
self, creator_id: str, is_public: str, room_version: RoomVersion,
|
||||
self, creator_id: str, is_public: bool, room_version: RoomVersion,
|
||||
):
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
@@ -930,23 +945,30 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
|
||||
class RoomContextHandler(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
async def get_event_context(self, user, room_id, event_id, limit, event_filter):
|
||||
async def get_event_context(
|
||||
self,
|
||||
user: UserID,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
limit: int,
|
||||
event_filter: Optional[Filter],
|
||||
) -> Optional[JsonDict]:
|
||||
"""Retrieves events, pagination tokens and state around a given event
|
||||
in a room.
|
||||
|
||||
Args:
|
||||
user (UserID)
|
||||
room_id (str)
|
||||
event_id (str)
|
||||
limit (int): The maximum number of events to return in total
|
||||
user
|
||||
room_id
|
||||
event_id
|
||||
limit: The maximum number of events to return in total
|
||||
(excluding state).
|
||||
event_filter (Filter|None): the filter to apply to the events returned
|
||||
event_filter: the filter to apply to the events returned
|
||||
(excluding the target event_id)
|
||||
|
||||
Returns:
|
||||
@@ -1033,15 +1055,21 @@ class RoomContextHandler(object):
|
||||
|
||||
|
||||
class RoomEventSource(object):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def get_new_events(
|
||||
self, user, from_key, limit, room_ids, is_guest, explicit_room_id=None
|
||||
):
|
||||
self,
|
||||
user: UserID,
|
||||
from_key: str,
|
||||
limit: int,
|
||||
room_ids: List[str],
|
||||
is_guest: bool,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
) -> Tuple[List[EventBase], str]:
|
||||
# We just ignore the key for now.
|
||||
|
||||
to_key = await self.get_current_key()
|
||||
to_key = self.get_current_key()
|
||||
|
||||
from_token = RoomStreamToken.parse(from_key)
|
||||
if from_token.topological:
|
||||
@@ -1081,10 +1109,10 @@ class RoomEventSource(object):
|
||||
|
||||
return (events, end_key)
|
||||
|
||||
def get_current_key(self):
|
||||
return self.store.get_room_events_max_id()
|
||||
def get_current_key(self) -> str:
|
||||
return "s%d" % (self.store.get_room_max_stream_ordering(),)
|
||||
|
||||
def get_current_key_for_room(self, room_id):
|
||||
def get_current_key_for_room(self, room_id: str) -> Awaitable[str]:
|
||||
return self.store.get_room_events_max_id(room_id)
|
||||
|
||||
|
||||
@@ -1096,7 +1124,7 @@ class RoomShutdownHandler(object):
|
||||
)
|
||||
DEFAULT_ROOM_NAME = "Content Violation Notification"
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self._room_creation_handler = hs.get_room_creation_handler()
|
||||
|
||||
@@ -16,13 +16,14 @@
|
||||
import abc
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, Iterable, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import EventFormatVersions
|
||||
from synapse.crypto.event_signing import compute_event_reference_hash
|
||||
from synapse.events import EventBase
|
||||
@@ -36,6 +37,10 @@ from synapse.util.distributor import user_joined_room, user_left_room
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -47,7 +52,7 @@ class RoomMemberHandler(object):
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
@@ -77,6 +82,17 @@ class RoomMemberHandler(object):
|
||||
if self._is_on_event_persistence_instance:
|
||||
self.persist_event_storage = hs.get_storage().persistence
|
||||
|
||||
self._join_rate_limiter_local = Ratelimiter(
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
|
||||
)
|
||||
self._join_rate_limiter_remote = Ratelimiter(
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
|
||||
)
|
||||
|
||||
# This is only used to get at ratelimit function, and
|
||||
# maybe_kick_guest_users. It's fine there are multiple of these as
|
||||
# it doesn't store state.
|
||||
@@ -195,7 +211,7 @@ class RoomMemberHandler(object):
|
||||
return duplicate.event_id, stream_id
|
||||
|
||||
stream_id = await self.event_creation_handler.handle_new_client_event(
|
||||
requester, event, context, extra_users=[target], ratelimit=ratelimit
|
||||
requester, event, context, extra_users=[target], ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
@@ -441,7 +457,28 @@ class RoomMemberHandler(object):
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
if not is_host_in_room:
|
||||
if is_host_in_room:
|
||||
time_now_s = self.clock.time()
|
||||
allowed, time_allowed = self._join_rate_limiter_local.can_do_action(
|
||||
requester.user.to_string(),
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s))
|
||||
)
|
||||
|
||||
else:
|
||||
time_now_s = self.clock.time()
|
||||
allowed, time_allowed = self._join_rate_limiter_remote.can_do_action(
|
||||
requester.user.to_string(),
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s))
|
||||
)
|
||||
|
||||
inviter = await self._get_inviter(target.to_string(), room_id)
|
||||
if inviter and not self.hs.is_mine(inviter):
|
||||
remote_room_hosts.append(inviter.domain)
|
||||
@@ -469,26 +506,39 @@ class RoomMemberHandler(object):
|
||||
user_id=target.to_string(), room_id=room_id
|
||||
) # type: Optional[RoomsForUser]
|
||||
if not invite:
|
||||
logger.info(
|
||||
"%s sent a leave request to %s, but that is not an active room "
|
||||
"on this server, and there is no pending invite",
|
||||
target,
|
||||
room_id,
|
||||
)
|
||||
|
||||
raise SynapseError(404, "Not a known room")
|
||||
|
||||
logger.info(
|
||||
"%s rejects invite to %s from %s", target, room_id, invite.sender
|
||||
)
|
||||
|
||||
if self.hs.is_mine_id(invite.sender):
|
||||
# the inviter was on our server, but has now left. Carry on
|
||||
# with the normal rejection codepath.
|
||||
#
|
||||
# This is a bit of a hack, because the room might still be
|
||||
# active on other servers.
|
||||
pass
|
||||
else:
|
||||
if not self.hs.is_mine_id(invite.sender):
|
||||
# send the rejection to the inviter's HS (with fallback to
|
||||
# local event)
|
||||
return await self.remote_reject_invite(
|
||||
invite.event_id, txn_id, requester, content,
|
||||
)
|
||||
|
||||
# the inviter was on our server, but has now left. Carry on
|
||||
# with the normal rejection codepath, which will also send the
|
||||
# rejection out to any other servers we believe are still in the room.
|
||||
|
||||
# thanks to overzealous cleaning up of event_forward_extremities in
|
||||
# `delete_old_current_state_events`, it's possible to end up with no
|
||||
# forward extremities here. If that happens, let's just hang the
|
||||
# rejection off the invite event.
|
||||
#
|
||||
# see: https://github.com/matrix-org/synapse/issues/7139
|
||||
if len(latest_event_ids) == 0:
|
||||
latest_event_ids = [invite.event_id]
|
||||
|
||||
return await self._local_membership_update(
|
||||
requester=requester,
|
||||
target=target,
|
||||
@@ -952,7 +1002,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
if self.hs.config.limit_remote_rooms.enabled:
|
||||
check_complexity = self.hs.config.limit_remote_rooms.enabled
|
||||
if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
|
||||
check_complexity = not await self.auth.is_server_admin(user)
|
||||
|
||||
if check_complexity:
|
||||
# Fetch the room complexity
|
||||
too_complex = await self._is_remote_room_too_complex(
|
||||
room_id, remote_room_hosts
|
||||
@@ -975,7 +1029,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
|
||||
# Check the room we just joined wasn't too large, if we didn't fetch the
|
||||
# complexity of it before.
|
||||
if self.hs.config.limit_remote_rooms.enabled:
|
||||
if check_complexity:
|
||||
if too_complex is False:
|
||||
# We checked, and we're under the limit.
|
||||
return event_id, stream_id
|
||||
|
||||
@@ -14,15 +14,16 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
from typing import Callable, Dict, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Callable, Dict, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
import saml2
|
||||
import saml2.response
|
||||
from saml2.client import Saml2Client
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import AuthError, SynapseError
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.saml2_config import SamlAttributeRequirement
|
||||
from synapse.http.servlet import parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.module_api import ModuleApi
|
||||
@@ -34,6 +35,9 @@ from synapse.types import (
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.iterutils import chunk_seq
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.server
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -49,7 +53,7 @@ class Saml2SessionData:
|
||||
|
||||
|
||||
class SamlHandler:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self._saml_client = Saml2Client(hs.config.saml2_sp_config)
|
||||
self._auth = hs.get_auth()
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
@@ -62,6 +66,7 @@ class SamlHandler:
|
||||
self._grandfathered_mxid_source_attribute = (
|
||||
hs.config.saml2_grandfathered_mxid_source_attribute
|
||||
)
|
||||
self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
|
||||
|
||||
# plugin to do custom mapping from saml response to mxid
|
||||
self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
|
||||
@@ -73,7 +78,7 @@ class SamlHandler:
|
||||
self._auth_provider_id = "saml"
|
||||
|
||||
# a map from saml session id to Saml2SessionData object
|
||||
self._outstanding_requests_dict = {}
|
||||
self._outstanding_requests_dict = {} # type: Dict[str, Saml2SessionData]
|
||||
|
||||
# a lock on the mappings
|
||||
self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock)
|
||||
@@ -96,6 +101,9 @@ class SamlHandler:
|
||||
relay_state=client_redirect_url
|
||||
)
|
||||
|
||||
# Since SAML sessions timeout it is useful to log when they were created.
|
||||
logger.info("Initiating a new SAML session: %s" % (reqid,))
|
||||
|
||||
now = self._clock.time_msec()
|
||||
self._outstanding_requests_dict[reqid] = Saml2SessionData(
|
||||
creation_time=now, ui_auth_session_id=ui_auth_session_id,
|
||||
@@ -162,11 +170,18 @@ class SamlHandler:
|
||||
saml2.BINDING_HTTP_POST,
|
||||
outstanding=self._outstanding_requests_dict,
|
||||
)
|
||||
except saml2.response.UnsolicitedResponse as e:
|
||||
# the pysaml2 library helpfully logs an ERROR here, but neglects to log
|
||||
# the session ID. I don't really want to put the full text of the exception
|
||||
# in the (user-visible) exception message, so let's log the exception here
|
||||
# so we can track down the session IDs later.
|
||||
logger.warning(str(e))
|
||||
raise SynapseError(400, "Unexpected SAML2 login.")
|
||||
except Exception as e:
|
||||
raise SynapseError(400, "Unable to parse SAML2 response: %s" % (e,))
|
||||
raise SynapseError(400, "Unable to parse SAML2 response: %s." % (e,))
|
||||
|
||||
if saml2_auth.not_signed:
|
||||
raise SynapseError(400, "SAML2 response was not signed")
|
||||
raise SynapseError(400, "SAML2 response was not signed.")
|
||||
|
||||
logger.debug("SAML2 response: %s", saml2_auth.origxml)
|
||||
for assertion in saml2_auth.assertions:
|
||||
@@ -185,6 +200,9 @@ class SamlHandler:
|
||||
saml2_auth.in_response_to, None
|
||||
)
|
||||
|
||||
for requirement in self._saml2_attribute_requirements:
|
||||
_check_attribute_requirement(saml2_auth.ava, requirement)
|
||||
|
||||
remote_user_id = self._user_mapping_provider.get_remote_user_id(
|
||||
saml2_auth, client_redirect_url
|
||||
)
|
||||
@@ -291,6 +309,21 @@ class SamlHandler:
|
||||
del self._outstanding_requests_dict[reqid]
|
||||
|
||||
|
||||
def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement):
|
||||
values = ava.get(req.attribute, [])
|
||||
for v in values:
|
||||
if v == req.value:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"SAML2 attribute %s did not match required value '%s' (was '%s')",
|
||||
req.attribute,
|
||||
req.value,
|
||||
values,
|
||||
)
|
||||
raise AuthError(403, "You are not authorized to log in here.")
|
||||
|
||||
|
||||
DOT_REPLACE_PATTERN = re.compile(
|
||||
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
|
||||
)
|
||||
|
||||
@@ -340,7 +340,7 @@ class SearchHandler(BaseHandler):
|
||||
# If client has asked for "context" for each event (i.e. some surrounding
|
||||
# events and state), fetch that
|
||||
if event_context is not None:
|
||||
now_token = await self.hs.get_event_sources().get_current_token()
|
||||
now_token = self.hs.get_event_sources().get_current_token()
|
||||
|
||||
contexts = {}
|
||||
for event in allowed_events:
|
||||
|
||||
@@ -232,7 +232,7 @@ class StatsHandler:
|
||||
|
||||
if membership == prev_membership:
|
||||
pass # noop
|
||||
if membership == Membership.JOIN:
|
||||
elif membership == Membership.JOIN:
|
||||
room_stats_delta["joined_members"] += 1
|
||||
elif membership == Membership.INVITE:
|
||||
room_stats_delta["invited_members"] += 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user