Compare commits
93 Commits
anoa/bla
...
erikj/fix_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
44f727446a | ||
|
|
9146a8a691 | ||
|
|
6d3905c7c7 | ||
|
|
1f4269700c | ||
|
|
7b71695388 | ||
|
|
70259d8c8c | ||
|
|
20a67aa70d | ||
|
|
654cc9470e | ||
|
|
de5cafe980 | ||
|
|
9e0f22874f | ||
|
|
84c0e46cce | ||
|
|
74f29284aa | ||
|
|
a312e890f5 | ||
|
|
626b8f0846 | ||
|
|
96e7d3c4a0 | ||
|
|
34c20493b9 | ||
|
|
21bb50ca3f | ||
|
|
8f27b7fde1 | ||
|
|
903d11c43a | ||
|
|
c356b4bf42 | ||
|
|
85c56445fb | ||
|
|
1fcdbeb3ab | ||
|
|
97647b33c2 | ||
|
|
79c1f973ce | ||
|
|
0afd83584b | ||
|
|
d6094176d1 | ||
|
|
1b70662be9 | ||
|
|
c8e9dc4cf4 | ||
|
|
6d7b22041d | ||
|
|
995cc615a0 | ||
|
|
402213bf41 | ||
|
|
0ec0bc3886 | ||
|
|
3ee17585cd | ||
|
|
da0090fdff | ||
|
|
5649669c3c | ||
|
|
6b5a115c0a | ||
|
|
c276bd9969 | ||
|
|
654e239b25 | ||
|
|
74976a8e43 | ||
|
|
9b8a53c7b9 | ||
|
|
a7d4985a6b | ||
|
|
f30f12a839 | ||
|
|
f49708dee3 | ||
|
|
9991aaa49c | ||
|
|
3a337f6d27 | ||
|
|
20fa83f374 | ||
|
|
8075504a60 | ||
|
|
0a08cd1065 | ||
|
|
1f39155071 | ||
|
|
4433d01519 | ||
|
|
27cfd712b3 | ||
|
|
470dedd266 | ||
|
|
4182bb812f | ||
|
|
9f87da0a84 | ||
|
|
7eff59ec91 | ||
|
|
19b15d63e8 | ||
|
|
618d405a32 | ||
|
|
1cf4a68108 | ||
|
|
9e66f3761c | ||
|
|
1264c8ac89 | ||
|
|
921a3f8a59 | ||
|
|
3ee97a2748 | ||
|
|
ec606ea9e3 | ||
|
|
d9dc6185d3 | ||
|
|
a34b17e492 | ||
|
|
091e9482af | ||
|
|
898196f1cc | ||
|
|
617e8a4653 | ||
|
|
d9d86c2996 | ||
|
|
123711ed19 | ||
|
|
d59378d86b | ||
|
|
629a951b49 | ||
|
|
b2486f6656 | ||
|
|
a9a8f29729 | ||
|
|
58e583eac1 | ||
|
|
b76f53bb79 | ||
|
|
a06b7a5d94 | ||
|
|
bc203c962f | ||
|
|
cd0f65d2c7 | ||
|
|
4aa027ea70 | ||
|
|
8de3703d21 | ||
|
|
f76194a021 | ||
|
|
6905f5751a | ||
|
|
d35a451399 | ||
|
|
9789b1fba5 | ||
|
|
ca2db5dd0c | ||
|
|
1781bbe319 | ||
|
|
66ac4b1e34 | ||
|
|
5009ffcaa4 | ||
|
|
fe0f4a3591 | ||
|
|
c9c0ad5e20 | ||
|
|
a93f3121f8 | ||
|
|
a97cec18bb |
@@ -1,22 +1,36 @@
|
||||
version: 2
|
||||
version: 2.1
|
||||
jobs:
|
||||
dockerhubuploadrelease:
|
||||
machine: true
|
||||
docker:
|
||||
- image: docker:git
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
||||
- setup_remote_docker
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
docker:
|
||||
- image: docker:git
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
|
||||
- setup_remote_docker
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- dockerhubuploadrelease:
|
||||
@@ -29,3 +43,33 @@ workflows:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
|
||||
commands:
|
||||
docker_prepare:
|
||||
description: Downloads the buildx cli plugin and enables multiarch images
|
||||
parameters:
|
||||
buildx_version:
|
||||
type: string
|
||||
default: "v0.4.1"
|
||||
steps:
|
||||
- run: apk add --no-cache curl
|
||||
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
|
||||
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
|
||||
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
# install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job
|
||||
- run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
# create a context named `builder` for the builds
|
||||
- run: docker context create builder
|
||||
# create a buildx builder using the new context, and set it as the default
|
||||
- run: docker buildx create builder --use
|
||||
|
||||
docker_build:
|
||||
description: Builds and pushed images to dockerhub using buildx
|
||||
parameters:
|
||||
platforms:
|
||||
type: string
|
||||
default: linux/amd64
|
||||
tag:
|
||||
type: string
|
||||
steps:
|
||||
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,6 +21,7 @@ _trial_temp*/
|
||||
/.python-version
|
||||
/*.signing.key
|
||||
/env/
|
||||
/.venv*/
|
||||
/homeserver*.yaml
|
||||
/logs
|
||||
/media_store/
|
||||
|
||||
51
CHANGES.md
51
CHANGES.md
@@ -1,3 +1,46 @@
|
||||
Synapse 1.21.2 (2020-10-15)
|
||||
===========================
|
||||
|
||||
Debian packages and Docker images have been rebuilt using the latest versions of dependency libraries, including authlib 0.15.1. Please see bugfixes below.
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
* HTML pages served via Synapse were vulnerable to cross-site scripting (XSS)
|
||||
attacks. All server administrators are encouraged to upgrade.
|
||||
([\#8444](https://github.com/matrix-org/synapse/pull/8444))
|
||||
([CVE-2020-26891](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26891))
|
||||
|
||||
This fix was originally included in v1.21.0 but was missing a security advisory.
|
||||
|
||||
This was reported by [Denis Kasak](https://github.com/dkasak).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix rare bug where sending an event would fail due to a racey assertion. ([\#8530](https://github.com/matrix-org/synapse/issues/8530))
|
||||
- An updated version of the authlib dependency is included in the Docker and Debian images to fix an issue using OpenID Connect. See [\#8534](https://github.com/matrix-org/synapse/issues/8534) for details.
|
||||
|
||||
|
||||
Synapse 1.21.1 (2020-10-13)
|
||||
===========================
|
||||
|
||||
This release fixes a regression in v1.21.0 that prevented debian packages from being built.
|
||||
It is otherwise identical to v1.21.0.
|
||||
|
||||
Synapse 1.21.0 (2020-10-12)
|
||||
===========================
|
||||
|
||||
No significant changes since v1.21.0rc3.
|
||||
|
||||
As [noted in
|
||||
v1.20.0](https://github.com/matrix-org/synapse/blob/release-v1.21.0/CHANGES.md#synapse-1200-2020-09-22),
|
||||
a future release will drop support for accessing Synapse's
|
||||
[Admin API](https://github.com/matrix-org/synapse/tree/master/docs/admin_api) under the
|
||||
`/_matrix/client/*` endpoint prefixes. At that point, the Admin API will only
|
||||
be accessible under `/_synapse/admin`.
|
||||
|
||||
|
||||
Synapse 1.21.0rc3 (2020-10-08)
|
||||
==============================
|
||||
|
||||
@@ -154,9 +197,11 @@ API](https://github.com/matrix-org/synapse/tree/master/docs) has been
|
||||
accessible under the `/_matrix/client/api/v1/admin`,
|
||||
`/_matrix/client/unstable/admin`, `/_matrix/client/r0/admin` and
|
||||
`/_synapse/admin` prefixes. In a future release, we will be dropping support
|
||||
for accessing Synapse's Admin API using the `/_matrix/client/*` prefixes. This
|
||||
makes it easier for homeserver admins to lock down external access to the Admin
|
||||
API endpoints.
|
||||
for accessing Synapse's Admin API using the `/_matrix/client/*` prefixes.
|
||||
|
||||
From that point, the Admin API will only be accessible under `/_synapse/admin`.
|
||||
This makes it easier for homeserver admins to lock down external access to the
|
||||
Admin API endpoints.
|
||||
|
||||
Synapse 1.20.0rc5 (2020-09-18)
|
||||
==============================
|
||||
|
||||
@@ -63,6 +63,10 @@ run-time:
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
You can also provided the `-d` option, which will lint the files that have been
|
||||
changed since the last git commit. This will often be significantly faster than
|
||||
linting the whole codebase.
|
||||
|
||||
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||
files that were corrected.
|
||||
|
||||
|
||||
1
changelog.d/7921.docker
Normal file
1
changelog.d/7921.docker
Normal file
@@ -0,0 +1 @@
|
||||
Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196.
|
||||
1
changelog.d/8390.docker
Normal file
1
changelog.d/8390.docker
Normal file
@@ -0,0 +1 @@
|
||||
Add support for passing commandline args to the synapse process. Contributed by @samuel-p.
|
||||
1
changelog.d/8437.feature
Normal file
1
changelog.d/8437.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices.
|
||||
1
changelog.d/8472.misc
Normal file
1
changelog.d/8472.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit.
|
||||
1
changelog.d/8476.bugfix
Normal file
1
changelog.d/8476.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix message duplication if something goes wrong after persisting the event.
|
||||
1
changelog.d/8479.feature
Normal file
1
changelog.d/8479.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add the ability to send non-membership events into a room via the `ModuleApi`.
|
||||
1
changelog.d/8488.misc
Normal file
1
changelog.d/8488.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow events to be sent to clients sooner when using sharded event persisters.
|
||||
1
changelog.d/8489.feature
Normal file
1
changelog.d/8489.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow running background tasks in a separate worker process.
|
||||
1
changelog.d/8494.misc
Normal file
1
changelog.d/8494.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove the deprecated `Handlers` object.
|
||||
1
changelog.d/8496.misc
Normal file
1
changelog.d/8496.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow events to be sent to clients sooner when using sharded event persisters.
|
||||
1
changelog.d/8497.misc
Normal file
1
changelog.d/8497.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix a threadsafety bug in unit tests.
|
||||
1
changelog.d/8499.misc
Normal file
1
changelog.d/8499.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow events to be sent to clients sooner when using sharded event persisters.
|
||||
1
changelog.d/8501.feature
Normal file
1
changelog.d/8501.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)).
|
||||
1
changelog.d/8502.feature
Normal file
1
changelog.d/8502.feature
Normal file
@@ -0,0 +1 @@
|
||||
Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz.
|
||||
1
changelog.d/8503.misc
Normal file
1
changelog.d/8503.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add user agent to user_daily_visits table.
|
||||
1
changelog.d/8504.bugfix
Normal file
1
changelog.d/8504.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Expose the `uk.half-shot.msc2778.login.application_service` to clients from the login API. This feature was added in v1.21.0, but was not exposed as a potential login flow.
|
||||
1
changelog.d/8505.misc
Normal file
1
changelog.d/8505.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to various parts of the code base.
|
||||
1
changelog.d/8507.misc
Normal file
1
changelog.d/8507.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to various parts of the code base.
|
||||
1
changelog.d/8513.feature
Normal file
1
changelog.d/8513.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow running background tasks in a separate worker process.
|
||||
1
changelog.d/8514.misc
Normal file
1
changelog.d/8514.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused code from the test framework.
|
||||
1
changelog.d/8515.misc
Normal file
1
changelog.d/8515.misc
Normal file
@@ -0,0 +1 @@
|
||||
Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable.
|
||||
1
changelog.d/8517.bugfix
Normal file
1
changelog.d/8517.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`.
|
||||
1
changelog.d/8526.doc
Normal file
1
changelog.d/8526.doc
Normal file
@@ -0,0 +1 @@
|
||||
Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196.
|
||||
1
changelog.d/8527.bugfix
Normal file
1
changelog.d/8527.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table.
|
||||
1
changelog.d/8529.doc
Normal file
1
changelog.d/8529.doc
Normal file
@@ -0,0 +1 @@
|
||||
Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration.
|
||||
1
changelog.d/8535.feature
Normal file
1
changelog.d/8535.feature
Normal file
@@ -0,0 +1 @@
|
||||
Support modifying event content in `ThirdPartyRules` modules.
|
||||
1
changelog.d/8536.bugfix
Normal file
1
changelog.d/8536.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix not sending events over federation when using sharded event writers.
|
||||
1
changelog.d/8537.misc
Normal file
1
changelog.d/8537.misc
Normal file
@@ -0,0 +1 @@
|
||||
Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`.
|
||||
1
changelog.d/8542.misc
Normal file
1
changelog.d/8542.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve database performance by executing more queries without starting transactions.
|
||||
1
changelog.d/8544.feature
Normal file
1
changelog.d/8544.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow running background tasks in a separate worker process.
|
||||
1
changelog.d/8545.bugfix
Normal file
1
changelog.d/8545.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long standing bug where email notifications for encrypted messages were blank.
|
||||
1
changelog.d/8547.misc
Normal file
1
changelog.d/8547.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enable mypy type checking for `synapse.util.caches`.
|
||||
1
changelog.d/8548.misc
Normal file
1
changelog.d/8548.misc
Normal file
@@ -0,0 +1 @@
|
||||
Rename `Cache` to `DeferredCache`, to better reflect its purpose.
|
||||
1
changelog.d/8561.misc
Normal file
1
changelog.d/8561.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move metric registration code down into `LruCache`.
|
||||
1
changelog.d/8562.misc
Normal file
1
changelog.d/8562.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type annotations for `LruCache`.
|
||||
1
changelog.d/8563.misc
Normal file
1
changelog.d/8563.misc
Normal file
@@ -0,0 +1 @@
|
||||
Replace `DeferredCache` with the lighter-weight `LruCache` where possible.
|
||||
1
changelog.d/8564.feature
Normal file
1
changelog.d/8564.feature
Normal file
@@ -0,0 +1 @@
|
||||
Support modifying event content in `ThirdPartyRules` modules.
|
||||
1
changelog.d/8566.misc
Normal file
1
changelog.d/8566.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add virtualenv-generated folders to `.gitignore`.
|
||||
1
changelog.d/8567.bugfix
Normal file
1
changelog.d/8567.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix increase in the number of `There was no active span...` errors logged when using OpenTracing.
|
||||
1
changelog.d/8568.misc
Normal file
1
changelog.d/8568.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add `get_immediate` method to `DeferredCache`.
|
||||
1
changelog.d/8569.misc
Normal file
1
changelog.d/8569.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix mypy not properly checking across the codebase, additionally, fix a typing assertion error in `handlers/auth.py`.
|
||||
1
changelog.d/8571.misc
Normal file
1
changelog.d/8571.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix `synmark` benchmark runner.
|
||||
1
changelog.d/8572.misc
Normal file
1
changelog.d/8572.misc
Normal file
@@ -0,0 +1 @@
|
||||
Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s.
|
||||
1
changelog.d/8577.misc
Normal file
1
changelog.d/8577.misc
Normal file
@@ -0,0 +1 @@
|
||||
Adjust a protocol-type definition to fit `sqlite3` assertions.
|
||||
1
changelog.d/8578.misc
Normal file
1
changelog.d/8578.misc
Normal file
@@ -0,0 +1 @@
|
||||
Support macOS on the `synmark` benchmark runner.
|
||||
1
changelog.d/8583.misc
Normal file
1
changelog.d/8583.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update `mypy` static type checker to 0.790.
|
||||
1
changelog.d/8585.bugfix
Normal file
1
changelog.d/8585.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed.
|
||||
1
changelog.d/8587.misc
Normal file
1
changelog.d/8587.misc
Normal file
@@ -0,0 +1 @@
|
||||
Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting.
|
||||
1
changelog.d/8589.removal
Normal file
1
changelog.d/8589.removal
Normal file
@@ -0,0 +1 @@
|
||||
Drop unused `device_max_stream_id` table.
|
||||
1
changelog.d/8590.misc
Normal file
1
changelog.d/8590.misc
Normal file
@@ -0,0 +1 @@
|
||||
Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices.
|
||||
1
changelog.d/8591.misc
Normal file
1
changelog.d/8591.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move metric registration code down into `LruCache`.
|
||||
1
changelog.d/8592.misc
Normal file
1
changelog.d/8592.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove extraneous unittest logging decorators from unit tests.
|
||||
1
changelog.d/8599.feature
Normal file
1
changelog.d/8599.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow running background tasks in a separate worker process.
|
||||
1
changelog.d/8600.misc
Normal file
1
changelog.d/8600.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update `mypy` static type checker to 0.790.
|
||||
1
changelog.d/8606.feature
Normal file
1
changelog.d/8606.feature
Normal file
@@ -0,0 +1 @@
|
||||
Limit appservice transactions to 100 persistent and 100 ephemeral events.
|
||||
1
changelog.d/8609.misc
Normal file
1
changelog.d/8609.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to profile and base handler.
|
||||
2
debian/build_virtualenv
vendored
2
debian/build_virtualenv
vendored
@@ -42,7 +42,7 @@ dh_virtualenv \
|
||||
--preinstall="mock" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all,systemd"
|
||||
--extras="all,systemd,test"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
|
||||
23
debian/changelog
vendored
23
debian/changelog
vendored
@@ -1,3 +1,26 @@
|
||||
matrix-synapse-py3 (1.21.2) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.21.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 15 Oct 2020 09:23:27 -0400
|
||||
|
||||
matrix-synapse-py3 (1.21.1) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.21.1.
|
||||
|
||||
[ Andrew Morgan ]
|
||||
* Explicitly install "test" python dependencies.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Oct 2020 10:24:13 +0100
|
||||
|
||||
matrix-synapse-py3 (1.21.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.21.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 12 Oct 2020 15:47:44 +0100
|
||||
|
||||
matrix-synapse-py3 (1.20.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.20.1.
|
||||
|
||||
@@ -83,7 +83,7 @@ docker logs synapse
|
||||
If all is well, you should now be able to connect to http://localhost:8008 and
|
||||
see a confirmation message.
|
||||
|
||||
The following environment variables are supported in run mode:
|
||||
The following environment variables are supported in `run` mode:
|
||||
|
||||
* `SYNAPSE_CONFIG_DIR`: where additional config files are stored. Defaults to
|
||||
`/data`.
|
||||
@@ -94,6 +94,20 @@ The following environment variables are supported in run mode:
|
||||
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
|
||||
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
|
||||
|
||||
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
|
||||
|
||||
```
|
||||
docker run -d --name synapse \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-p 8008:8008 \
|
||||
matrixdotorg/synapse:latest run \
|
||||
-m synapse.app.generic_worker \
|
||||
--config-path=/data/homeserver.yaml \
|
||||
--config-path=/data/generic_worker.yaml
|
||||
```
|
||||
|
||||
If you do not provide `-m`, the value of the `SYNAPSE_WORKER` environment variable is used. If you do not provide at least one `--config-path` or `-c`, the value of the `SYNAPSE_CONFIG_PATH` environment variable is used instead.
|
||||
|
||||
## Generating an (admin) user
|
||||
|
||||
After synapse is running, you may wish to create a user via `register_new_matrix_user`.
|
||||
|
||||
@@ -90,7 +90,7 @@ federation_rc_concurrent: 3
|
||||
|
||||
media_store_path: "/data/media"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
|
||||
|
||||
@@ -179,7 +179,7 @@ def run_generate_config(environ, ownership):
|
||||
|
||||
|
||||
def main(args, environ):
|
||||
mode = args[1] if len(args) > 1 else None
|
||||
mode = args[1] if len(args) > 1 else "run"
|
||||
desired_uid = int(environ.get("UID", "991"))
|
||||
desired_gid = int(environ.get("GID", "991"))
|
||||
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
|
||||
@@ -205,36 +205,47 @@ def main(args, environ):
|
||||
config_dir, config_path, environ, ownership
|
||||
)
|
||||
|
||||
if mode is not None:
|
||||
if mode != "run":
|
||||
error("Unknown execution mode '%s'" % (mode,))
|
||||
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
args = args[2:]
|
||||
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"""\
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get(
|
||||
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
|
||||
)
|
||||
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"""\
|
||||
Config file '%s' does not exist.
|
||||
|
||||
The synapse docker image no longer supports generating a config file on-the-fly
|
||||
based on environment variables. You can migrate to a static config file by
|
||||
running with 'migrate_config'. See the README for more details.
|
||||
"""
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
args += ["--config-path", config_path]
|
||||
|
||||
log("Starting synapse with config file " + config_path)
|
||||
log("Starting synapse with args " + " ".join(args))
|
||||
|
||||
args = ["python", "-m", synapse_worker, "--config-path", config_path]
|
||||
args = ["python"] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
|
||||
@@ -5,8 +5,45 @@ The "manhole" allows server administrators to access a Python shell on a running
|
||||
Synapse installation. This is a very powerful mechanism for administration and
|
||||
debugging.
|
||||
|
||||
**_Security Warning_**
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
***
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`:
|
||||
`homeserver.yaml`. The configuration is slightly different if you're using docker.
|
||||
|
||||
#### Docker config
|
||||
|
||||
If you are using Docker, set `bind_addresses` to `['0.0.0.0']` as shown:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 9000
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: manhole
|
||||
```
|
||||
|
||||
When using `docker run` to start the server, you will then need to change the command to the following to include the
|
||||
`manhole` port forwarding. The `-p 127.0.0.1:9000:9000` below is important: it
|
||||
ensures that access to the `manhole` is only possible for local users.
|
||||
|
||||
```bash
|
||||
docker run -d --name synapse \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-p 8008:8008 \
|
||||
-p 127.0.0.1:9000:9000 \
|
||||
matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
#### Native config
|
||||
|
||||
If you are not using docker, set `bind_addresses` to `['::1', '127.0.0.1']` as shown.
|
||||
The `bind_addresses` in the example below is important: it ensures that access to the
|
||||
`manhole` is only possible for local users).
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
@@ -15,12 +52,7 @@ listeners:
|
||||
type: manhole
|
||||
```
|
||||
|
||||
(`bind_addresses` in the above is important: it ensures that access to the
|
||||
manhole is only possible for local users).
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
#### Accessing synapse manhole
|
||||
|
||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||
the username `matrix`:
|
||||
|
||||
@@ -136,24 +136,34 @@ the server's database.
|
||||
|
||||
### Lifetime limits
|
||||
|
||||
**Note: this feature is mainly useful within a closed federation or on
|
||||
servers that don't federate, because there currently is no way to
|
||||
enforce these limits in an open federation.**
|
||||
|
||||
Server admins can restrict the values their local users are allowed to
|
||||
use for both `min_lifetime` and `max_lifetime`. These limits can be
|
||||
defined as such in the `retention` section of the configuration file:
|
||||
Server admins can set limits on the values of `max_lifetime` to use when
|
||||
purging old events in a room. These limits can be defined as such in the
|
||||
`retention` section of the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
allowed_lifetime_max: 1y
|
||||
```
|
||||
|
||||
Here, `allowed_lifetime_min` is the lowest value a local user can set
|
||||
for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max`
|
||||
is the highest value. Both parameters are optional (e.g. setting
|
||||
`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a
|
||||
minimum and no maximum).
|
||||
The limits are considered when running purge jobs. If necessary, the
|
||||
effective value of `max_lifetime` will be brought between
|
||||
`allowed_lifetime_min` and `allowed_lifetime_max` (inclusive).
|
||||
This means that, if the value of `max_lifetime` defined in the room's state
|
||||
is lower than `allowed_lifetime_min`, the value of `allowed_lifetime_min`
|
||||
will be used instead. Likewise, if the value of `max_lifetime` is higher
|
||||
than `allowed_lifetime_max`, the value of `allowed_lifetime_max` will be
|
||||
used instead.
|
||||
|
||||
In the example above, we ensure Synapse never deletes events that are less
|
||||
than one day old, and that it always deletes events that are over a year
|
||||
old.
|
||||
|
||||
If a default policy is set, and its `max_lifetime` value is lower than
|
||||
`allowed_lifetime_min` or higher than `allowed_lifetime_max`, the same
|
||||
process applies.
|
||||
|
||||
Both parameters are optional; if one is omitted Synapse won't use it to
|
||||
adjust the effective value of `max_lifetime`.
|
||||
|
||||
Like other settings in this section, these parameters can be expressed
|
||||
either as a duration or as a number of milliseconds.
|
||||
|
||||
@@ -54,7 +54,7 @@ server {
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 10M;
|
||||
client_max_body_size 50M;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -893,7 +893,7 @@ media_store_path: "DATADIR/media_store"
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
#
|
||||
#max_upload_size: 10M
|
||||
#max_upload_size: 50M
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
#
|
||||
|
||||
@@ -15,7 +15,7 @@ example flow would be (where '>' indicates master to worker and
|
||||
|
||||
> SERVER example.com
|
||||
< REPLICATE
|
||||
> POSITION events master 53
|
||||
> POSITION events master 53 53
|
||||
> RDATA events master 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events master 55 ["$foo4:bar.com", ...]
|
||||
|
||||
@@ -138,9 +138,9 @@ the wire:
|
||||
< NAME synapse.app.appservice
|
||||
< PING 1490197665618
|
||||
< REPLICATE
|
||||
> POSITION events master 1
|
||||
> POSITION backfill master 1
|
||||
> POSITION caches master 1
|
||||
> POSITION events master 1 1
|
||||
> POSITION backfill master 1 1
|
||||
> POSITION caches master 1 1
|
||||
> RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events master 14 ["$149019767112vOHxz:localhost:8823",
|
||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||
@@ -185,6 +185,11 @@ client (C):
|
||||
updates via HTTP API, rather than via the DB, then processes should make the
|
||||
request to the appropriate process.
|
||||
|
||||
Two positions are included, the "new" position and the last position sent respectively.
|
||||
This allows servers to tell instances that the positions have advanced but no
|
||||
data has been written, without clients needlessly checking to see if they
|
||||
have missed any updates.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
There was an error
|
||||
|
||||
11
mypy.ini
11
mypy.ini
@@ -15,9 +15,14 @@ files =
|
||||
synapse/events/builder.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/federation,
|
||||
synapse/handlers/_base.py,
|
||||
synapse/handlers/account_data.py,
|
||||
synapse/handlers/appservice.py,
|
||||
synapse/handlers/auth.py,
|
||||
synapse/handlers/cas_handler.py,
|
||||
synapse/handlers/deactivate_account.py,
|
||||
synapse/handlers/device.py,
|
||||
synapse/handlers/devicemessage.py,
|
||||
synapse/handlers/directory.py,
|
||||
synapse/handlers/events.py,
|
||||
synapse/handlers/federation.py,
|
||||
@@ -26,7 +31,10 @@ files =
|
||||
synapse/handlers/message.py,
|
||||
synapse/handlers/oidc_handler.py,
|
||||
synapse/handlers/pagination.py,
|
||||
synapse/handlers/password_policy.py,
|
||||
synapse/handlers/presence.py,
|
||||
synapse/handlers/profile.py,
|
||||
synapse/handlers/read_marker.py,
|
||||
synapse/handlers/room.py,
|
||||
synapse/handlers/room_member.py,
|
||||
synapse/handlers/room_member_worker.py,
|
||||
@@ -59,8 +67,7 @@ files =
|
||||
synapse/streams,
|
||||
synapse/types.py,
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches/descriptors.py,
|
||||
synapse/util/caches/stream_change_cache.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/metrics.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
@@ -7,15 +7,90 @@
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -ge 1 ]
|
||||
then
|
||||
files=$*
|
||||
else
|
||||
files="synapse tests scripts-dev scripts contrib synctl"
|
||||
usage() {
|
||||
echo
|
||||
echo "Usage: $0 [-h] [-d] [paths...]"
|
||||
echo
|
||||
echo "-d"
|
||||
echo " Lint files that have changed since the last git commit."
|
||||
echo
|
||||
echo " If paths are provided and this option is set, both provided paths and those"
|
||||
echo " that have changed since the last commit will be linted."
|
||||
echo
|
||||
echo " If no paths are provided and this option is not set, all files will be linted."
|
||||
echo
|
||||
echo " Note that paths with a file extension that is not '.py' will be excluded."
|
||||
echo "-h"
|
||||
echo " Display this help text."
|
||||
}
|
||||
|
||||
USING_DIFF=0
|
||||
files=()
|
||||
|
||||
while getopts ":dh" opt; do
|
||||
case $opt in
|
||||
d)
|
||||
USING_DIFF=1
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
\?)
|
||||
echo "ERROR: Invalid option: -$OPTARG" >&2
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Strip any options from the command line arguments now that
|
||||
# we've finished processing them
|
||||
shift "$((OPTIND-1))"
|
||||
|
||||
if [ $USING_DIFF -eq 1 ]; then
|
||||
# Check both staged and non-staged changes
|
||||
for path in $(git diff HEAD --name-only); do
|
||||
filename=$(basename "$path")
|
||||
file_extension="${filename##*.}"
|
||||
|
||||
# If an extension is present, and it's something other than 'py',
|
||||
# then ignore this file
|
||||
if [[ -n ${file_extension+x} && $file_extension != "py" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Append this path to our list of files to lint
|
||||
files+=("$path")
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Linting these locations: $files"
|
||||
isort $files
|
||||
python3 -m black $files
|
||||
# Append any remaining arguments as files to lint
|
||||
files+=("$@")
|
||||
|
||||
if [[ $USING_DIFF -eq 1 ]]; then
|
||||
# If we were asked to lint changed files, and no paths were found as a result...
|
||||
if [ ${#files[@]} -eq 0 ]; then
|
||||
# Then print and exit
|
||||
echo "No files found to lint."
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
# If we were not asked to lint changed files, and no paths were found as a result,
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
files=("synapse" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py")
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Linting these paths: ${files[*]}"
|
||||
echo
|
||||
|
||||
# Print out the commands being run
|
||||
set -x
|
||||
|
||||
isort "${files[@]}"
|
||||
python3 -m black "${files[@]}"
|
||||
./scripts-dev/config-lint.sh
|
||||
flake8 $files
|
||||
flake8 "${files[@]}"
|
||||
|
||||
@@ -22,6 +22,7 @@ import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -152,7 +153,7 @@ IGNORED_TABLES = {
|
||||
|
||||
# Error returned by the run function. Used at the top-level part of the script to
|
||||
# handle errors and return codes.
|
||||
end_error = None
|
||||
end_error = None # type: Optional[str]
|
||||
# The exec_info for the error, if any. If error is defined but not exec_info the script
|
||||
# will show only the error message without the stacktrace, if exec_info is defined but
|
||||
# not the error then the script will show nothing outside of what's printed in the run
|
||||
@@ -635,7 +636,7 @@ class Porter(object):
|
||||
self.progress.done()
|
||||
except Exception as e:
|
||||
global end_error_exec_info
|
||||
end_error = e
|
||||
end_error = str(e)
|
||||
end_error_exec_info = sys.exc_info()
|
||||
logger.exception("")
|
||||
finally:
|
||||
@@ -817,7 +818,7 @@ class Porter(object):
|
||||
"ALTER SEQUENCE events_stream_seq RESTART WITH %s", (next_id,)
|
||||
)
|
||||
|
||||
txn.execute("SELECT -MIN(stream_ordering) FROM events")
|
||||
txn.execute("SELECT GREATEST(-MIN(stream_ordering), 1) FROM events")
|
||||
curr_id = txn.fetchone()[0]
|
||||
if curr_id:
|
||||
next_id = curr_id + 1
|
||||
|
||||
6
setup.py
6
setup.py
@@ -15,12 +15,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import glob
|
||||
import os
|
||||
from setuptools import setup, find_packages, Command
|
||||
import sys
|
||||
|
||||
from setuptools import Command, find_packages, setup
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
@@ -104,6 +102,8 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from .sorteddict import (
|
||||
SortedDict,
|
||||
SortedKeysView,
|
||||
SortedItemsView,
|
||||
SortedValuesView,
|
||||
)
|
||||
from .sorteddict import SortedDict, SortedItemsView, SortedKeysView, SortedValuesView
|
||||
from .sortedlist import SortedKeyList, SortedList, SortedListWithKey
|
||||
|
||||
__all__ = [
|
||||
"SortedDict",
|
||||
"SortedKeysView",
|
||||
"SortedItemsView",
|
||||
"SortedValuesView",
|
||||
"SortedKeyList",
|
||||
"SortedList",
|
||||
"SortedListWithKey",
|
||||
]
|
||||
|
||||
177
stubs/sortedcontainers/sortedlist.pyi
Normal file
177
stubs/sortedcontainers/sortedlist.pyi
Normal file
@@ -0,0 +1,177 @@
|
||||
# stub for SortedList. This is an exact copy of
|
||||
# https://github.com/grantjenks/python-sortedcontainers/blob/a419ffbd2b1c935b09f11f0971696e537fd0c510/sortedcontainers/sortedlist.pyi
|
||||
# (from https://github.com/grantjenks/python-sortedcontainers/pull/107)
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
MutableSequence,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_SL = TypeVar("_SL", bound=SortedList)
|
||||
_SKL = TypeVar("_SKL", bound=SortedKeyList)
|
||||
_Key = Callable[[_T], Any]
|
||||
_Repr = Callable[[], str]
|
||||
|
||||
def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ...
|
||||
|
||||
class SortedList(MutableSequence[_T]):
|
||||
|
||||
DEFAULT_LOAD_FACTOR: int = ...
|
||||
def __init__(
|
||||
self, iterable: Optional[Iterable[_T]] = ..., key: Optional[_Key[_T]] = ...,
|
||||
): ...
|
||||
# NB: currently mypy does not honour return type, see mypy #3307
|
||||
@overload
|
||||
def __new__(cls: Type[_SL], iterable: None, key: None) -> _SL: ...
|
||||
@overload
|
||||
def __new__(cls: Type[_SL], iterable: None, key: _Key[_T]) -> SortedKeyList[_T]: ...
|
||||
@overload
|
||||
def __new__(cls: Type[_SL], iterable: Iterable[_T], key: None) -> _SL: ...
|
||||
@overload
|
||||
def __new__(cls, iterable: Iterable[_T], key: _Key[_T]) -> SortedKeyList[_T]: ...
|
||||
@property
|
||||
def key(self) -> Optional[Callable[[_T], Any]]: ...
|
||||
def _reset(self, load: int) -> None: ...
|
||||
def clear(self) -> None: ...
|
||||
def _clear(self) -> None: ...
|
||||
def add(self, value: _T) -> None: ...
|
||||
def _expand(self, pos: int) -> None: ...
|
||||
def update(self, iterable: Iterable[_T]) -> None: ...
|
||||
def _update(self, iterable: Iterable[_T]) -> None: ...
|
||||
def discard(self, value: _T) -> None: ...
|
||||
def remove(self, value: _T) -> None: ...
|
||||
def _delete(self, pos: int, idx: int) -> None: ...
|
||||
def _loc(self, pos: int, idx: int) -> int: ...
|
||||
def _pos(self, idx: int) -> int: ...
|
||||
def _build_index(self) -> None: ...
|
||||
def __contains__(self, value: Any) -> bool: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_T]: ...
|
||||
@overload
|
||||
def _getitem(self, index: int) -> _T: ...
|
||||
@overload
|
||||
def _getitem(self, index: slice) -> List[_T]: ...
|
||||
@overload
|
||||
def __setitem__(self, index: int, value: _T) -> None: ...
|
||||
@overload
|
||||
def __setitem__(self, index: slice, value: Iterable[_T]) -> None: ...
|
||||
def __iter__(self) -> Iterator[_T]: ...
|
||||
def __reversed__(self) -> Iterator[_T]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def reverse(self) -> None: ...
|
||||
def islice(
|
||||
self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool,
|
||||
) -> Iterator[_T]: ...
|
||||
def _islice(
|
||||
self, min_pos: int, min_idx: int, max_pos: int, max_idx: int, reverse: bool,
|
||||
) -> Iterator[_T]: ...
|
||||
def irange(
|
||||
self,
|
||||
minimum: Optional[int] = ...,
|
||||
maximum: Optional[int] = ...,
|
||||
inclusive: Tuple[bool, bool] = ...,
|
||||
reverse: bool = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
def bisect_left(self, value: _T) -> int: ...
|
||||
def bisect_right(self, value: _T) -> int: ...
|
||||
def bisect(self, value: _T) -> int: ...
|
||||
def _bisect_right(self, value: _T) -> int: ...
|
||||
def count(self, value: _T) -> int: ...
|
||||
def copy(self: _SL) -> _SL: ...
|
||||
def __copy__(self: _SL) -> _SL: ...
|
||||
def append(self, value: _T) -> None: ...
|
||||
def extend(self, values: Iterable[_T]) -> None: ...
|
||||
def insert(self, index: int, value: _T) -> None: ...
|
||||
def pop(self, index: int = ...) -> _T: ...
|
||||
def index(
|
||||
self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ...
|
||||
) -> int: ...
|
||||
def __add__(self: _SL, other: Iterable[_T]) -> _SL: ...
|
||||
def __radd__(self: _SL, other: Iterable[_T]) -> _SL: ...
|
||||
def __iadd__(self: _SL, other: Iterable[_T]) -> _SL: ...
|
||||
def __mul__(self: _SL, num: int) -> _SL: ...
|
||||
def __rmul__(self: _SL, num: int) -> _SL: ...
|
||||
def __imul__(self: _SL, num: int) -> _SL: ...
|
||||
def __eq__(self, other: Any) -> bool: ...
|
||||
def __ne__(self, other: Any) -> bool: ...
|
||||
def __lt__(self, other: Sequence[_T]) -> bool: ...
|
||||
def __gt__(self, other: Sequence[_T]) -> bool: ...
|
||||
def __le__(self, other: Sequence[_T]) -> bool: ...
|
||||
def __ge__(self, other: Sequence[_T]) -> bool: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def _check(self) -> None: ...
|
||||
|
||||
class SortedKeyList(SortedList[_T]):
|
||||
def __init__(
|
||||
self, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ...
|
||||
) -> None: ...
|
||||
def __new__(
|
||||
cls, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ...
|
||||
) -> SortedKeyList[_T]: ...
|
||||
@property
|
||||
def key(self) -> Callable[[_T], Any]: ...
|
||||
def clear(self) -> None: ...
|
||||
def _clear(self) -> None: ...
|
||||
def add(self, value: _T) -> None: ...
|
||||
def _expand(self, pos: int) -> None: ...
|
||||
def update(self, iterable: Iterable[_T]) -> None: ...
|
||||
def _update(self, iterable: Iterable[_T]) -> None: ...
|
||||
# NB: Must be T to be safely passed to self.func, yet base class imposes Any
|
||||
def __contains__(self, value: _T) -> bool: ... # type: ignore
|
||||
def discard(self, value: _T) -> None: ...
|
||||
def remove(self, value: _T) -> None: ...
|
||||
def _delete(self, pos: int, idx: int) -> None: ...
|
||||
def irange(
|
||||
self,
|
||||
minimum: Optional[int] = ...,
|
||||
maximum: Optional[int] = ...,
|
||||
inclusive: Tuple[bool, bool] = ...,
|
||||
reverse: bool = ...,
|
||||
): ...
|
||||
def irange_key(
|
||||
self,
|
||||
min_key: Optional[Any] = ...,
|
||||
max_key: Optional[Any] = ...,
|
||||
inclusive: Tuple[bool, bool] = ...,
|
||||
reserve: bool = ...,
|
||||
): ...
|
||||
def bisect_left(self, value: _T) -> int: ...
|
||||
def bisect_right(self, value: _T) -> int: ...
|
||||
def bisect(self, value: _T) -> int: ...
|
||||
def bisect_key_left(self, key: Any) -> int: ...
|
||||
def _bisect_key_left(self, key: Any) -> int: ...
|
||||
def bisect_key_right(self, key: Any) -> int: ...
|
||||
def _bisect_key_right(self, key: Any) -> int: ...
|
||||
def bisect_key(self, key: Any) -> int: ...
|
||||
def count(self, value: _T) -> int: ...
|
||||
def copy(self: _SKL) -> _SKL: ...
|
||||
def __copy__(self: _SKL) -> _SKL: ...
|
||||
def index(
|
||||
self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ...
|
||||
) -> int: ...
|
||||
def __add__(self: _SKL, other: Iterable[_T]) -> _SKL: ...
|
||||
def __radd__(self: _SKL, other: Iterable[_T]) -> _SKL: ...
|
||||
def __iadd__(self: _SKL, other: Iterable[_T]) -> _SKL: ...
|
||||
def __mul__(self: _SKL, num: int) -> _SKL: ...
|
||||
def __rmul__(self: _SKL, num: int) -> _SKL: ...
|
||||
def __imul__(self: _SKL, num: int) -> _SKL: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def _check(self) -> None: ...
|
||||
|
||||
SortedListWithKey = SortedKeyList
|
||||
@@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.21.0rc3"
|
||||
__version__ = "1.21.2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -34,7 +34,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging import opentracing as opentracing
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -70,8 +69,9 @@ class Auth:
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.token_cache = LruCache(10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
self.token_cache = LruCache(
|
||||
10000, "token_cache"
|
||||
) # type: LruCache[str, Tuple[str, bool]]
|
||||
|
||||
self._auth_blocking = AuthBlocking(self.hs)
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ async def export_data_command(hs, args):
|
||||
user_id = args.user_id
|
||||
directory = args.output_directory
|
||||
|
||||
res = await hs.get_handlers().admin_handler.export_user_data(
|
||||
res = await hs.get_admin_handler().export_user_data(
|
||||
user_id, FileExfiltrationWriter(user_id, directory=directory)
|
||||
)
|
||||
print(res)
|
||||
|
||||
@@ -790,10 +790,6 @@ class FederationSenderHandler:
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
await self.update_token(token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
self.federation_sender.notify_new_events(token)
|
||||
|
||||
# ... and when new receipts happen
|
||||
elif stream_name == ReceiptsStream.NAME:
|
||||
await self._on_new_receipts(rows)
|
||||
|
||||
@@ -18,10 +18,7 @@ import sys
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
@@ -152,13 +149,8 @@ def start_phone_stats_home(hs):
|
||||
clock.looping_call(hs.get_datastore().generate_user_daily_visits, 5 * 60 * 1000)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
def reap_monthly_active_users():
|
||||
return run_as_background_process(
|
||||
"reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users
|
||||
)
|
||||
|
||||
clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
|
||||
reap_monthly_active_users()
|
||||
clock.looping_call(hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60)
|
||||
hs.get_datastore().reap_monthly_active_users()
|
||||
|
||||
@wrap_as_background_process("generate_monthly_active_users")
|
||||
async def generate_monthly_active_users():
|
||||
|
||||
@@ -14,14 +14,15 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Iterable, List, Match, Optional
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import GroupID, JsonDict, UserID, get_domain_from_id
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -32,38 +33,6 @@ class ApplicationServiceState:
|
||||
UP = "up"
|
||||
|
||||
|
||||
class AppServiceTransaction:
|
||||
"""Represents an application service transaction."""
|
||||
|
||||
def __init__(self, service, id, events):
|
||||
self.service = service
|
||||
self.id = id
|
||||
self.events = events
|
||||
|
||||
async def send(self, as_api: ApplicationServiceApi) -> bool:
|
||||
"""Sends this transaction using the provided AS API interface.
|
||||
|
||||
Args:
|
||||
as_api: The API to use to send.
|
||||
Returns:
|
||||
True if the transaction was sent.
|
||||
"""
|
||||
return await as_api.push_bulk(
|
||||
service=self.service, events=self.events, txn_id=self.id
|
||||
)
|
||||
|
||||
async def complete(self, store: "DataStore") -> None:
|
||||
"""Completes this transaction as successful.
|
||||
|
||||
Marks this transaction ID on the application service and removes the
|
||||
transaction contents from the database.
|
||||
|
||||
Args:
|
||||
store: The database store to operate on.
|
||||
"""
|
||||
await store.complete_appservice_txn(service=self.service, txn_id=self.id)
|
||||
|
||||
|
||||
class ApplicationService:
|
||||
"""Defines an application service. This definition is mostly what is
|
||||
provided to the /register AS API.
|
||||
@@ -91,6 +60,7 @@ class ApplicationService:
|
||||
protocols=None,
|
||||
rate_limited=True,
|
||||
ip_range_whitelist=None,
|
||||
supports_ephemeral=False,
|
||||
):
|
||||
self.token = token
|
||||
self.url = (
|
||||
@@ -102,6 +72,7 @@ class ApplicationService:
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
self.ip_range_whitelist = ip_range_whitelist
|
||||
self.supports_ephemeral = supports_ephemeral
|
||||
|
||||
if "|" in self.id:
|
||||
raise Exception("application service ID cannot contain '|' character")
|
||||
@@ -161,19 +132,21 @@ class ApplicationService:
|
||||
raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
|
||||
return namespaces
|
||||
|
||||
def _matches_regex(self, test_string, namespace_key):
|
||||
def _matches_regex(self, test_string: str, namespace_key: str) -> Optional[Match]:
|
||||
for regex_obj in self.namespaces[namespace_key]:
|
||||
if regex_obj["regex"].match(test_string):
|
||||
return regex_obj
|
||||
return None
|
||||
|
||||
def _is_exclusive(self, ns_key, test_string):
|
||||
def _is_exclusive(self, ns_key: str, test_string: str) -> bool:
|
||||
regex_obj = self._matches_regex(test_string, ns_key)
|
||||
if regex_obj:
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
async def _matches_user(self, event, store):
|
||||
async def _matches_user(
|
||||
self, event: Optional[EventBase], store: Optional["DataStore"] = None
|
||||
) -> bool:
|
||||
if not event:
|
||||
return False
|
||||
|
||||
@@ -188,14 +161,23 @@ class ApplicationService:
|
||||
if not store:
|
||||
return False
|
||||
|
||||
does_match = await self._matches_user_in_member_list(event.room_id, store)
|
||||
does_match = await self.matches_user_in_member_list(event.room_id, store)
|
||||
return does_match
|
||||
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = await store.get_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
@cached(num_args=1)
|
||||
async def matches_user_in_member_list(
|
||||
self, room_id: str, store: "DataStore"
|
||||
) -> bool:
|
||||
"""Check if this service is interested a room based upon it's membership
|
||||
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
store: The datastore to query.
|
||||
|
||||
Returns:
|
||||
True if this service would like to know about this room.
|
||||
"""
|
||||
member_list = await store.get_users_in_room(room_id)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
@@ -203,12 +185,14 @@ class ApplicationService:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
def _matches_room_id(self, event: EventBase) -> bool:
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
async def _matches_aliases(self, event, store):
|
||||
async def _matches_aliases(
|
||||
self, event: EventBase, store: Optional["DataStore"] = None
|
||||
) -> bool:
|
||||
if not store or not event:
|
||||
return False
|
||||
|
||||
@@ -218,12 +202,15 @@ class ApplicationService:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def is_interested(self, event, store=None) -> bool:
|
||||
async def is_interested(
|
||||
self, event: EventBase, store: Optional["DataStore"] = None
|
||||
) -> bool:
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
store(DataStore)
|
||||
event: The event to check.
|
||||
store: The datastore to query.
|
||||
|
||||
Returns:
|
||||
True if this service would like to know about this event.
|
||||
"""
|
||||
@@ -231,39 +218,66 @@ class ApplicationService:
|
||||
if self._matches_room_id(event):
|
||||
return True
|
||||
|
||||
if await self._matches_aliases(event, store):
|
||||
# This will check the namespaces first before
|
||||
# checking the store, so should be run before _matches_aliases
|
||||
if await self._matches_user(event, store):
|
||||
return True
|
||||
|
||||
if await self._matches_user(event, store):
|
||||
# This will check the store, so should be run last
|
||||
if await self._matches_aliases(event, store):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
@cached(num_args=1)
|
||||
async def is_interested_in_presence(
|
||||
self, user_id: UserID, store: "DataStore"
|
||||
) -> bool:
|
||||
"""Check if this service is interested a user's presence
|
||||
|
||||
Args:
|
||||
user_id: The user to check.
|
||||
store: The datastore to query.
|
||||
|
||||
Returns:
|
||||
True if this service would like to know about presence for this user.
|
||||
"""
|
||||
# Find all the rooms the sender is in
|
||||
if self.is_interested_in_user(user_id.to_string()):
|
||||
return True
|
||||
room_ids = await store.get_rooms_for_user(user_id.to_string())
|
||||
|
||||
# Then find out if the appservice is interested in any of those rooms
|
||||
for room_id in room_ids:
|
||||
if await self.matches_user_in_member_list(room_id, store):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_interested_in_user(self, user_id: str) -> bool:
|
||||
return (
|
||||
self._matches_regex(user_id, ApplicationService.NS_USERS)
|
||||
bool(self._matches_regex(user_id, ApplicationService.NS_USERS))
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_alias(self, alias):
|
||||
def is_interested_in_alias(self, alias: str) -> bool:
|
||||
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||
|
||||
def is_interested_in_room(self, room_id):
|
||||
def is_interested_in_room(self, room_id: str) -> bool:
|
||||
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||
|
||||
def is_exclusive_user(self, user_id):
|
||||
def is_exclusive_user(self, user_id: str) -> bool:
|
||||
return (
|
||||
self._is_exclusive(ApplicationService.NS_USERS, user_id)
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_protocol(self, protocol):
|
||||
def is_interested_in_protocol(self, protocol: str) -> bool:
|
||||
return protocol in self.protocols
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
def is_exclusive_alias(self, alias: str) -> bool:
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
def is_exclusive_room(self, room_id):
|
||||
def is_exclusive_room(self, room_id: str) -> bool:
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def get_exclusive_user_regexes(self):
|
||||
@@ -276,14 +290,14 @@ class ApplicationService:
|
||||
if regex_obj["exclusive"]
|
||||
]
|
||||
|
||||
def get_groups_for_user(self, user_id):
|
||||
def get_groups_for_user(self, user_id: str) -> Iterable[str]:
|
||||
"""Get the groups that this user is associated with by this AS
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user.
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
iterable[str]: an iterable that yields group_id strings.
|
||||
An iterable that yields group_id strings.
|
||||
"""
|
||||
return (
|
||||
regex_obj["group_id"]
|
||||
@@ -291,7 +305,7 @@ class ApplicationService:
|
||||
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||
)
|
||||
|
||||
def is_rate_limited(self):
|
||||
def is_rate_limited(self) -> bool:
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self):
|
||||
@@ -300,3 +314,45 @@ class ApplicationService:
|
||||
dict_copy["token"] = "<redacted>"
|
||||
dict_copy["hs_token"] = "<redacted>"
|
||||
return "ApplicationService: %s" % (dict_copy,)
|
||||
|
||||
|
||||
class AppServiceTransaction:
|
||||
"""Represents an application service transaction."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
service: ApplicationService,
|
||||
id: int,
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
):
|
||||
self.service = service
|
||||
self.id = id
|
||||
self.events = events
|
||||
self.ephemeral = ephemeral
|
||||
|
||||
async def send(self, as_api: "ApplicationServiceApi") -> bool:
|
||||
"""Sends this transaction using the provided AS API interface.
|
||||
|
||||
Args:
|
||||
as_api: The API to use to send.
|
||||
Returns:
|
||||
True if the transaction was sent.
|
||||
"""
|
||||
return await as_api.push_bulk(
|
||||
service=self.service,
|
||||
events=self.events,
|
||||
ephemeral=self.ephemeral,
|
||||
txn_id=self.id,
|
||||
)
|
||||
|
||||
async def complete(self, store: "DataStore") -> None:
|
||||
"""Completes this transaction as successful.
|
||||
|
||||
Marks this transaction ID on the application service and removes the
|
||||
transaction contents from the database.
|
||||
|
||||
Args:
|
||||
store: The database store to operate on.
|
||||
"""
|
||||
await store.complete_appservice_txn(service=self.service, txn_id=self.id)
|
||||
|
||||
@@ -14,12 +14,13 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EventTypes, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||
@@ -93,7 +94,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
async def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
@@ -201,7 +202,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
key = (service.id, protocol)
|
||||
return await self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
async def push_bulk(self, service, events, txn_id=None):
|
||||
async def push_bulk(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
txn_id: Optional[int] = None,
|
||||
):
|
||||
if service.url is None:
|
||||
return True
|
||||
|
||||
@@ -211,15 +218,19 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning(
|
||||
"push_bulk: Missing txn ID sending events to %s", service.url
|
||||
)
|
||||
txn_id = str(0)
|
||||
txn_id = str(txn_id)
|
||||
txn_id = 0
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
|
||||
|
||||
# Never send ephemeral events to appservices that do not support it
|
||||
if service.supports_ephemeral:
|
||||
body = {"events": events, "de.sorunome.msc2409.ephemeral": ephemeral}
|
||||
else:
|
||||
body = {"events": events}
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id))
|
||||
try:
|
||||
await self.put_json(
|
||||
uri=uri,
|
||||
json_body={"events": events},
|
||||
args={"access_token": service.hs_token},
|
||||
uri=uri, json_body=body, args={"access_token": service.hs_token},
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(events))
|
||||
|
||||
@@ -49,14 +49,24 @@ This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.appservice import ApplicationService, ApplicationServiceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Maximum number of events to provide in an AS transaction.
|
||||
MAX_PERSISTENT_EVENTS_PER_TRANSACTION = 100
|
||||
|
||||
# Maximum number of ephemeral events to provide in an AS transaction.
|
||||
MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100
|
||||
|
||||
|
||||
class ApplicationServiceScheduler:
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
components together. This also serves as the "event_pool", which in this
|
||||
@@ -82,8 +92,13 @@ class ApplicationServiceScheduler:
|
||||
for service in services:
|
||||
self.txn_ctrl.start_recoverer(service)
|
||||
|
||||
def submit_event_for_as(self, service, event):
|
||||
self.queuer.enqueue(service, event)
|
||||
def submit_event_for_as(self, service: ApplicationService, event: EventBase):
|
||||
self.queuer.enqueue_event(service, event)
|
||||
|
||||
def submit_ephemeral_events_for_as(
|
||||
self, service: ApplicationService, events: List[JsonDict]
|
||||
):
|
||||
self.queuer.enqueue_ephemeral(service, events)
|
||||
|
||||
|
||||
class _ServiceQueuer:
|
||||
@@ -96,17 +111,15 @@ class _ServiceQueuer:
|
||||
|
||||
def __init__(self, txn_ctrl, clock):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.queued_ephemeral = {} # dict of {service_id: [events]}
|
||||
|
||||
# the appservices which currently have a transaction in flight
|
||||
self.requests_in_flight = set()
|
||||
self.txn_ctrl = txn_ctrl
|
||||
self.clock = clock
|
||||
|
||||
def enqueue(self, service, event):
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
|
||||
def _start_background_request(self, service):
|
||||
# start a sender for this appservice if we don't already have one
|
||||
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
@@ -114,7 +127,15 @@ class _ServiceQueuer:
|
||||
"as-sender-%s" % (service.id,), self._send_request, service
|
||||
)
|
||||
|
||||
async def _send_request(self, service):
|
||||
def enqueue_event(self, service: ApplicationService, event: EventBase):
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
self._start_background_request(service)
|
||||
|
||||
def enqueue_ephemeral(self, service: ApplicationService, events: List[JsonDict]):
|
||||
self.queued_ephemeral.setdefault(service.id, []).extend(events)
|
||||
self._start_background_request(service)
|
||||
|
||||
async def _send_request(self, service: ApplicationService):
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
# running.
|
||||
assert service.id not in self.requests_in_flight
|
||||
@@ -122,11 +143,19 @@ class _ServiceQueuer:
|
||||
self.requests_in_flight.add(service.id)
|
||||
try:
|
||||
while True:
|
||||
events = self.queued_events.pop(service.id, [])
|
||||
if not events:
|
||||
all_events = self.queued_events.get(service.id, [])
|
||||
events = all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION]
|
||||
del all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION]
|
||||
|
||||
all_events_ephemeral = self.queued_ephemeral.get(service.id, [])
|
||||
ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION]
|
||||
del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION]
|
||||
|
||||
if not events and not ephemeral:
|
||||
return
|
||||
|
||||
try:
|
||||
await self.txn_ctrl.send(service, events)
|
||||
await self.txn_ctrl.send(service, events, ephemeral)
|
||||
except Exception:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
@@ -158,9 +187,16 @@ class _TransactionController:
|
||||
# for UTs
|
||||
self.RECOVERER_CLASS = _Recoverer
|
||||
|
||||
async def send(self, service, events):
|
||||
async def send(
|
||||
self,
|
||||
service: ApplicationService,
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict] = [],
|
||||
):
|
||||
try:
|
||||
txn = await self.store.create_appservice_txn(service=service, events=events)
|
||||
txn = await self.store.create_appservice_txn(
|
||||
service=service, events=events, ephemeral=ephemeral
|
||||
)
|
||||
service_is_up = await self._is_service_up(service)
|
||||
if service_is_up:
|
||||
sent = await txn.send(self.as_api)
|
||||
@@ -204,7 +240,7 @@ class _TransactionController:
|
||||
recoverer.recover()
|
||||
logger.info("Now %i active recoverers", len(self.recoverers))
|
||||
|
||||
async def _is_service_up(self, service):
|
||||
async def _is_service_up(self, service: ApplicationService) -> bool:
|
||||
state = await self.store.get_appservice_state(service)
|
||||
return state == ApplicationServiceState.UP or state is None
|
||||
|
||||
|
||||
@@ -160,6 +160,8 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
if as_info.get("ip_range_whitelist"):
|
||||
ip_range_whitelist = IPSet(as_info.get("ip_range_whitelist"))
|
||||
|
||||
supports_ephemeral = as_info.get("de.sorunome.msc2409.push_ephemeral", False)
|
||||
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
hostname=hostname,
|
||||
@@ -168,6 +170,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
hs_token=as_info["hs_token"],
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
supports_ephemeral=supports_ephemeral,
|
||||
protocols=protocols,
|
||||
rate_limited=rate_limited,
|
||||
ip_range_whitelist=ip_range_whitelist,
|
||||
|
||||
@@ -100,7 +100,7 @@ class ContentRepositoryConfig(Config):
|
||||
"media_instance_running_background_jobs",
|
||||
)
|
||||
|
||||
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
|
||||
self.max_upload_size = self.parse_size(config.get("max_upload_size", "50M"))
|
||||
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
|
||||
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
|
||||
|
||||
@@ -242,7 +242,7 @@ class ContentRepositoryConfig(Config):
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
#
|
||||
#max_upload_size: 10M
|
||||
#max_upload_size: 50M
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
#
|
||||
|
||||
@@ -312,6 +312,12 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
return [e for e, _ in self.auth_events]
|
||||
|
||||
def freeze(self):
|
||||
"""'Freeze' the event dict, so it cannot be modified by accident"""
|
||||
|
||||
# this will be a no-op if the event dict is already frozen.
|
||||
self._dict = freeze(self._dict)
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
|
||||
@@ -97,32 +97,37 @@ class EventBuilder:
|
||||
def is_state(self):
|
||||
return self._state_key is not None
|
||||
|
||||
async def build(self, prev_event_ids: List[str]) -> EventBase:
|
||||
async def build(
|
||||
self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]],
|
||||
) -> EventBase:
|
||||
"""Transform into a fully signed and hashed event
|
||||
|
||||
Args:
|
||||
prev_event_ids: The event IDs to use as the prev events
|
||||
auth_event_ids: The event IDs to use as the auth events.
|
||||
Should normally be set to None, which will cause them to be calculated
|
||||
based on the room state at the prev_events.
|
||||
|
||||
Returns:
|
||||
The signed and hashed event.
|
||||
"""
|
||||
|
||||
state_ids = await self._state.get_current_state_ids(
|
||||
self.room_id, prev_event_ids
|
||||
)
|
||||
auth_ids = self._auth.compute_auth_events(self, state_ids)
|
||||
if auth_event_ids is None:
|
||||
state_ids = await self._state.get_current_state_ids(
|
||||
self.room_id, prev_event_ids
|
||||
)
|
||||
auth_event_ids = self._auth.compute_auth_events(self, state_ids)
|
||||
|
||||
format_version = self.room_version.event_format
|
||||
if format_version == EventFormatVersions.V1:
|
||||
# The types of auth/prev events changes between event versions.
|
||||
auth_events = await self._store.add_event_hashes(
|
||||
auth_ids
|
||||
auth_event_ids
|
||||
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
prev_events = await self._store.add_event_hashes(
|
||||
prev_event_ids
|
||||
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
else:
|
||||
auth_events = auth_ids
|
||||
auth_events = auth_event_ids
|
||||
prev_events = prev_event_ids
|
||||
|
||||
old_depth = await self._store.get_max_depth_of(prev_event_ids)
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Callable
|
||||
|
||||
from typing import Callable, Union
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
@@ -44,15 +45,20 @@ class ThirdPartyEventRules:
|
||||
|
||||
async def check_event_allowed(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> bool:
|
||||
) -> Union[bool, dict]:
|
||||
"""Check if a provided event should be allowed in the given context.
|
||||
|
||||
The module can return:
|
||||
* True: the event is allowed.
|
||||
* False: the event is not allowed, and should be rejected with M_FORBIDDEN.
|
||||
* a dict: replacement event data.
|
||||
|
||||
Args:
|
||||
event: The event to be checked.
|
||||
context: The context of the event.
|
||||
|
||||
Returns:
|
||||
True if the event should be allowed, False if not.
|
||||
The result from the ThirdPartyRules module, as above
|
||||
"""
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
@@ -63,9 +69,10 @@ class ThirdPartyEventRules:
|
||||
events = await self.store.get_events(prev_state_ids.values())
|
||||
state_events = {(ev.type, ev.state_key): ev for ev in events.values()}
|
||||
|
||||
# The module can modify the event slightly if it wants, but caution should be
|
||||
# exercised, and it's likely to go very wrong if applied to events received over
|
||||
# federation.
|
||||
# Ensure that the event is frozen, to make sure that the module is not tempted
|
||||
# to try to modify it. Any attempt to modify it at this point will invalidate
|
||||
# the hashes and signatures.
|
||||
event.freeze()
|
||||
|
||||
return await self.third_party_rules.check_event_allowed(event, state_events)
|
||||
|
||||
|
||||
@@ -83,6 +83,9 @@ class EventValidator:
|
||||
Args:
|
||||
event (FrozenEvent): The event to validate.
|
||||
"""
|
||||
if not event.is_state():
|
||||
raise SynapseError(code=400, msg="must be a state event")
|
||||
|
||||
min_lifetime = event.content.get("min_lifetime")
|
||||
max_lifetime = event.content.get("max_lifetime")
|
||||
|
||||
|
||||
@@ -99,10 +99,15 @@ class FederationServer(FederationBase):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.handler = hs.get_handlers().federation_handler
|
||||
self.handler = hs.get_federation_handler()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
# Ensure the following handlers are loaded since they register callbacks
|
||||
# with FederationHandlerRegistry.
|
||||
hs.get_directory_handler()
|
||||
|
||||
self._federation_ratelimiter = hs.get_federation_ratelimiter()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
@@ -111,7 +116,7 @@ class FederationServer(FederationBase):
|
||||
# We cache results for transaction with the same ID
|
||||
self._transaction_resp_cache = ResponseCache(
|
||||
hs, "fed_txn_handler", timeout_ms=30000
|
||||
)
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
@@ -119,10 +124,12 @@ class FederationServer(FederationBase):
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||
self._state_resp_cache = ResponseCache(
|
||||
hs, "state_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
self._state_ids_resp_cache = ResponseCache(
|
||||
hs, "state_ids_resp", timeout_ms=30000
|
||||
)
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self._federation_metrics_domains = (
|
||||
hs.get_config().federation.federation_metrics_domains
|
||||
@@ -861,7 +868,7 @@ class FederationHandlerRegistry:
|
||||
self._edu_type_to_instance = {} # type: Dict[str, str]
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, dict], Awaitable[None]]
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation EDU of the given type.
|
||||
|
||||
@@ -188,7 +188,7 @@ class FederationRemoteSendQueue:
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
def notify_new_events(self, current_id):
|
||||
def notify_new_events(self, max_token):
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
@@ -40,7 +40,7 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.types import ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -154,10 +154,15 @@ class FederationSender:
|
||||
self._per_destination_queues[destination] = queue
|
||||
return queue
|
||||
|
||||
def notify_new_events(self, current_id: int) -> None:
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
# We just use the minimum stream ordering and ignore the vector clock
|
||||
# component. This is safe to do as long as we *always* ignore the vector
|
||||
# clock components.
|
||||
current_id = max_token.stream
|
||||
|
||||
self._last_poked_id = max(current_id, self._last_poked_id)
|
||||
|
||||
if self._is_processing:
|
||||
|
||||
@@ -12,36 +12,3 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .admin import AdminHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .federation import FederationHandler
|
||||
from .identity import IdentityHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
class Handlers:
|
||||
|
||||
""" Deprecated. A collection of handlers.
|
||||
|
||||
At some point most of the classes whose name ended "Handler" were
|
||||
accessed through this class.
|
||||
|
||||
However this makes it painful to unit test the handlers and to run cut
|
||||
down versions of synapse that only use specific handlers because using a
|
||||
single handler required creating all of the handlers. So some of the
|
||||
handlers have been lifted out of the Handlers object and are now accessed
|
||||
directly through the homeserver object itself.
|
||||
|
||||
Any new handlers should follow the new pattern of being accessed through
|
||||
the homeserver object and should not be added to the Handlers object.
|
||||
|
||||
The remaining handlers should be moved out of the handlers object.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.identity_handler = IdentityHandler(hs)
|
||||
self.search_handler = SearchHandler(hs)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import synapse.state
|
||||
import synapse.storage
|
||||
@@ -22,6 +23,9 @@ from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.types import UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -30,11 +34,7 @@ class BaseHandler:
|
||||
Common base class for the event handlers.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore() # type: synapse.storage.DataStore
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -56,7 +56,7 @@ class BaseHandler:
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.rc_admin_redaction.per_second,
|
||||
burst_count=self.hs.config.rc_admin_redaction.burst_count,
|
||||
)
|
||||
) # type: Optional[Ratelimiter]
|
||||
else:
|
||||
self.admin_redaction_ratelimiter = None
|
||||
|
||||
@@ -127,15 +127,15 @@ class BaseHandler:
|
||||
if guest_access != "can_join":
|
||||
if context:
|
||||
current_state_ids = await context.get_current_state_ids()
|
||||
current_state = await self.store.get_events(
|
||||
current_state_dict = await self.store.get_events(
|
||||
list(current_state_ids.values())
|
||||
)
|
||||
current_state = list(current_state_dict.values())
|
||||
else:
|
||||
current_state = await self.state_handler.get_current_state(
|
||||
current_state_map = await self.state_handler.get_current_state(
|
||||
event.room_id
|
||||
)
|
||||
|
||||
current_state = list(current_state.values())
|
||||
current_state = list(current_state_map.values())
|
||||
|
||||
logger.info("maybe_kick_guest_users %r", current_state)
|
||||
await self.kick_guest_users(current_state)
|
||||
|
||||
@@ -12,16 +12,24 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
|
||||
class AccountDataEventSource:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
def get_current_key(self, direction="f"):
|
||||
def get_current_key(self, direction: str = "f") -> int:
|
||||
return self.store.get_max_account_data_stream_id()
|
||||
|
||||
async def get_new_events(self, user, from_key, **kwargs):
|
||||
async def get_new_events(
|
||||
self, user: UserID, from_key: int, **kwargs
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
user_id = user.to_string()
|
||||
last_stream_id = from_key
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ from typing import List
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
|
||||
@@ -63,15 +63,10 @@ class AccountValidityHandler:
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
def send_emails():
|
||||
# run as a background process to make sure that the database transactions
|
||||
# have a logcontext to report to
|
||||
return run_as_background_process(
|
||||
"send_renewals", self._send_renewal_emails
|
||||
)
|
||||
|
||||
self.clock.looping_call(send_emails, 30 * 60 * 1000)
|
||||
if hs.config.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
|
||||
@wrap_as_background_process("send_renewals")
|
||||
async def _send_renewal_emails(self):
|
||||
"""Gets the list of users whose account is expiring in the amount of time
|
||||
configured in the ``renew_at`` parameter from the ``account_validity``
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -21,12 +22,16 @@ from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import (
|
||||
event_processing_loop_counter,
|
||||
event_processing_loop_room_count,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Collection, JsonDict, RoomStreamToken, UserID
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -43,19 +48,22 @@ class ApplicationServicesHandler:
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.notify_appservices
|
||||
self.event_sources = hs.get_event_sources()
|
||||
|
||||
self.current_max = 0
|
||||
self.is_processing = False
|
||||
|
||||
async def notify_interested_services(self, current_id):
|
||||
async def notify_interested_services(self, max_token: RoomStreamToken):
|
||||
"""Notifies (pushes) all application services interested in this event.
|
||||
|
||||
Pushing is done asynchronously, so this method won't block for any
|
||||
prolonged length of time.
|
||||
|
||||
Args:
|
||||
current_id(int): The current maximum ID.
|
||||
"""
|
||||
# We just use the minimum stream ordering and ignore the vector clock
|
||||
# component. This is safe to do as long as we *always* ignore the vector
|
||||
# clock components.
|
||||
current_id = max_token.stream
|
||||
|
||||
services = self.store.get_app_services()
|
||||
if not services or not self.notify_appservices:
|
||||
return
|
||||
@@ -79,7 +87,7 @@ class ApplicationServicesHandler:
|
||||
if not events:
|
||||
break
|
||||
|
||||
events_by_room = {}
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
@@ -158,6 +166,104 @@ class ApplicationServicesHandler:
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
async def notify_interested_services_ephemeral(
|
||||
self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [],
|
||||
):
|
||||
"""This is called by the notifier in the background
|
||||
when a ephemeral event handled by the homeserver.
|
||||
|
||||
This will determine which appservices
|
||||
are interested in the event, and submit them.
|
||||
|
||||
Events will only be pushed to appservices
|
||||
that have opted into ephemeral events
|
||||
|
||||
Args:
|
||||
stream_key: The stream the event came from.
|
||||
new_token: The latest stream token
|
||||
users: The user(s) involved with the event.
|
||||
"""
|
||||
services = [
|
||||
service
|
||||
for service in self.store.get_app_services()
|
||||
if service.supports_ephemeral
|
||||
]
|
||||
if not services or not self.notify_appservices:
|
||||
return
|
||||
logger.info("Checking interested services for %s" % (stream_key))
|
||||
with Measure(self.clock, "notify_interested_services_ephemeral"):
|
||||
for service in services:
|
||||
# Only handle typing if we have the latest token
|
||||
if stream_key == "typing_key" and new_token is not None:
|
||||
events = await self._handle_typing(service, new_token)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
# We don't persist the token for typing_key for performance reasons
|
||||
elif stream_key == "receipt_key":
|
||||
events = await self._handle_receipts(service)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "read_receipt", new_token
|
||||
)
|
||||
elif stream_key == "presence_key":
|
||||
events = await self._handle_presence(service, users)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "presence", new_token
|
||||
)
|
||||
|
||||
async def _handle_typing(self, service: ApplicationService, new_token: int):
|
||||
typing_source = self.event_sources.sources["typing"]
|
||||
# Get the typing events from just before current
|
||||
typing, _ = await typing_source.get_new_events_as(
|
||||
service=service,
|
||||
# For performance reasons, we don't persist the previous
|
||||
# token in the DB and instead fetch the latest typing information
|
||||
# for appservices.
|
||||
from_key=new_token - 1,
|
||||
)
|
||||
return typing
|
||||
|
||||
async def _handle_receipts(self, service: ApplicationService):
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
service, "read_receipt"
|
||||
)
|
||||
receipts_source = self.event_sources.sources["receipt"]
|
||||
receipts, _ = await receipts_source.get_new_events_as(
|
||||
service=service, from_key=from_key
|
||||
)
|
||||
return receipts
|
||||
|
||||
async def _handle_presence(
|
||||
self, service: ApplicationService, users: Collection[UserID]
|
||||
):
|
||||
events = [] # type: List[JsonDict]
|
||||
presence_source = self.event_sources.sources["presence"]
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
service, "presence"
|
||||
)
|
||||
for user in users:
|
||||
interested = await service.is_interested_in_presence(user, self.store)
|
||||
if not interested:
|
||||
continue
|
||||
presence_events, _ = await presence_source.get_new_events(
|
||||
user=user, service=service, from_key=from_key,
|
||||
)
|
||||
time_now = self.clock.time_msec()
|
||||
presence_events = [
|
||||
{
|
||||
"type": "m.presence",
|
||||
"sender": event.user_id,
|
||||
"content": format_user_presence_state(
|
||||
event, time_now, include_user_id=False
|
||||
),
|
||||
}
|
||||
for event in presence_events
|
||||
]
|
||||
events = events + presence_events
|
||||
|
||||
async def query_user_exists(self, user_id):
|
||||
"""Check if any application service knows this user_id exists.
|
||||
|
||||
@@ -220,7 +326,7 @@ class ApplicationServicesHandler:
|
||||
|
||||
async def get_3pe_protocols(self, only_protocol=None):
|
||||
services = self.store.get_app_services()
|
||||
protocols = {}
|
||||
protocols = {} # type: Dict[str, List[JsonDict]]
|
||||
|
||||
# Collect up all the individual protocol responses out of the ASes
|
||||
for s in services:
|
||||
|
||||
@@ -1080,7 +1080,7 @@ class AuthHandler(BaseHandler):
|
||||
if medium == "email":
|
||||
address = canonicalise_email(address)
|
||||
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
identity_handler = self.hs.get_identity_handler()
|
||||
result = await identity_handler.try_unbind_threepid(
|
||||
user_id, {"medium": medium, "address": address, "id_server": id_server}
|
||||
)
|
||||
@@ -1122,20 +1122,22 @@ class AuthHandler(BaseHandler):
|
||||
Whether self.hash(password) == stored_hash.
|
||||
"""
|
||||
|
||||
def _do_validate_hash():
|
||||
def _do_validate_hash(checked_hash: bytes):
|
||||
# Normalise the Unicode in the password
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
return bcrypt.checkpw(
|
||||
pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"),
|
||||
stored_hash,
|
||||
checked_hash,
|
||||
)
|
||||
|
||||
if stored_hash:
|
||||
if not isinstance(stored_hash, bytes):
|
||||
stored_hash = stored_hash.encode("ascii")
|
||||
|
||||
return await defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
||||
return await defer_to_thread(
|
||||
self.hs.get_reactor(), _do_validate_hash, stored_hash
|
||||
)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -22,19 +22,22 @@ from synapse.types import UserID, create_requester
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeactivateAccountHandler(BaseHandler):
|
||||
"""Handler which deals with deactivating user accounts."""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.hs = hs
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._identity_handler = hs.get_handlers().identity_handler
|
||||
self._identity_handler = hs.get_identity_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
@@ -42,7 +45,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
|
||||
# Start the user parter loop so it can resume parting users from rooms where
|
||||
# it left off (if it has work left to do).
|
||||
if hs.config.worker_app is None:
|
||||
if hs.config.run_background_tasks:
|
||||
hs.get_reactor().callWhenRunning(self._start_user_parting)
|
||||
|
||||
self._account_validity_enabled = hs.config.account_validity.enabled
|
||||
@@ -137,7 +140,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
|
||||
return identity_server_supports_unbinding
|
||||
|
||||
async def _reject_pending_invites_for_user(self, user_id: str):
|
||||
async def _reject_pending_invites_for_user(self, user_id: str) -> None:
|
||||
"""Reject pending invites addressed to a given user ID.
|
||||
|
||||
Args:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user