Compare commits
105 Commits
erikj/fixu
...
travis/win
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1f5a25932 | ||
|
|
6b95c11e78 | ||
|
|
d48061b7e6 | ||
|
|
963a60c7e7 | ||
|
|
8e7da35402 | ||
|
|
028b103ae0 | ||
|
|
abb1384502 | ||
|
|
0ed1c64c83 | ||
|
|
1353fb3347 | ||
|
|
b15e17ce6e | ||
|
|
8cdd2d214e | ||
|
|
3fef535ff2 | ||
|
|
62134dcc77 | ||
|
|
23eed4f72a | ||
|
|
4721177241 | ||
|
|
7879f288df | ||
|
|
ccbd619b43 | ||
|
|
c896030f67 | ||
|
|
4d7e53707c | ||
|
|
cf69f8d59b | ||
|
|
20de685a4b | ||
|
|
8e9e6f1a0a | ||
|
|
57538eb4d9 | ||
|
|
45b35f8eae | ||
|
|
2ec257d608 | ||
|
|
daeaeb2a7b | ||
|
|
7786ae7e1c | ||
|
|
22aeb78b77 | ||
|
|
a9d2e40ea4 | ||
|
|
0c4f7a3d16 | ||
|
|
75b788f49f | ||
|
|
7be03d854b | ||
|
|
fa91655805 | ||
|
|
0d2b75cf92 | ||
|
|
ccce858c4a | ||
|
|
99c107920d | ||
|
|
1609855ff8 | ||
|
|
8f890447b0 | ||
|
|
b905ae27ca | ||
|
|
1ce59d7ba0 | ||
|
|
b3b793786c | ||
|
|
9c8f1a6d41 | ||
|
|
5b5280e3e5 | ||
|
|
635e3927d2 | ||
|
|
a1b8897668 | ||
|
|
76b9f14c0a | ||
|
|
1eccbfb82f | ||
|
|
2f5a77efae | ||
|
|
b11f5c984b | ||
|
|
27756c9fdf | ||
|
|
cc5e5893fe | ||
|
|
7c169f4d2c | ||
|
|
f75da9cc53 | ||
|
|
75c19bf57a | ||
|
|
b924a8e1a9 | ||
|
|
a8dcd686fb | ||
|
|
315b8d2032 | ||
|
|
9f47513458 | ||
|
|
ef7fbdfebd | ||
|
|
9cf0ef9c70 | ||
|
|
a023538822 | ||
|
|
f79dbd0f61 | ||
|
|
c89fea3fd1 | ||
|
|
554a92601a | ||
|
|
a98cb87bee | ||
|
|
6e8af83193 | ||
|
|
805e6c9a8f | ||
|
|
3c61ddbbc9 | ||
|
|
ae4c236a6d | ||
|
|
930a64b6c1 | ||
|
|
7a11c0ac4f | ||
|
|
cf711ac03c | ||
|
|
700d2cc4a0 | ||
|
|
1e74b50dc6 | ||
|
|
7c2d8f1f01 | ||
|
|
118b734081 | ||
|
|
7a6186b888 | ||
|
|
452a59f887 | ||
|
|
adeedb7b7c | ||
|
|
7c5fb13f7b | ||
|
|
f8d57ce656 | ||
|
|
13ed84c573 | ||
|
|
4243c1f074 | ||
|
|
3239b7459c | ||
|
|
c99203d98c | ||
|
|
9104a9f0d0 | ||
|
|
a412a5829d | ||
|
|
7ef89b985d | ||
|
|
bdf82efea5 | ||
|
|
afaf2d9388 | ||
|
|
199223062a | ||
|
|
97c3d98816 | ||
|
|
fa3adc896a | ||
|
|
79767a1108 | ||
|
|
4af654f0da | ||
|
|
1c7d85fdfe | ||
|
|
5a65e8a0d1 | ||
|
|
088992a484 | ||
|
|
d17d931a53 | ||
|
|
334123f0cd | ||
|
|
d8e81f67eb | ||
|
|
19a3d5b606 | ||
|
|
52813a8d94 | ||
|
|
a5485437cf | ||
|
|
e5b8a3e37f |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,4 +2,4 @@
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Build and push all platforms
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5
|
||||
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
8
.github/workflows/release-artifacts.yml
vendored
8
.github/workflows/release-artifacts.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +112,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-11"
|
||||
os: "macos-12"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
- os: "macos-12"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.16.2
|
||||
run: python -m pip install cibuildwheel==2.19.1
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@@ -21,6 +21,7 @@ jobs:
|
||||
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
@@ -73,6 +74,9 @@ jobs:
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting_readme:
|
||||
- 'README.rst'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
@@ -135,7 +139,7 @@ jobs:
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
run: poetry run ruff check --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -269,6 +273,20 @@ jobs:
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# This is to detect issues with the rst file, which can otherwise cause issues
|
||||
# when uploading packages to PyPi.
|
||||
lint-readme:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
- run: "rstcheck --report-level=WARNING README.rst"
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
@@ -284,6 +302,7 @@ jobs:
|
||||
- lint-clippy
|
||||
- lint-clippy-nightly
|
||||
- lint-rustfmt
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
@@ -301,6 +320,7 @@ jobs:
|
||||
lint-clippy
|
||||
lint-clippy-nightly
|
||||
lint-rustfmt
|
||||
lint-readme
|
||||
|
||||
|
||||
calculate-test-jobs:
|
||||
|
||||
174
CHANGES.md
174
CHANGES.md
@@ -1,3 +1,177 @@
|
||||
# Synapse 1.111.0rc1 (2024-07-09)
|
||||
|
||||
### Features
|
||||
|
||||
- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320))
|
||||
- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337))
|
||||
- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the
|
||||
remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388))
|
||||
- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395))
|
||||
- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400))
|
||||
- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403))
|
||||
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356))
|
||||
- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379))
|
||||
- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318))
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367))
|
||||
- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411))
|
||||
- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390))
|
||||
- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410))
|
||||
- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404))
|
||||
* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382))
|
||||
* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413))
|
||||
* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384))
|
||||
* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414))
|
||||
* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412))
|
||||
* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415))
|
||||
* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381))
|
||||
* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409))
|
||||
* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408))
|
||||
* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380))
|
||||
|
||||
# Synapse 1.110.0 (2024-07-03)
|
||||
|
||||
No significant changes since 1.110.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc3 (2024-07-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
|
||||
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc2 (2024-06-26)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc1 (2024-06-26)
|
||||
|
||||
### Features
|
||||
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
|
||||
- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
|
||||
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
|
||||
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
|
||||
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
|
||||
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
|
||||
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
|
||||
- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
|
||||
- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
|
||||
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
|
||||
- `register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
|
||||
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
|
||||
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
|
||||
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
|
||||
- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
|
||||
- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
|
||||
- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
|
||||
- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
|
||||
- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
|
||||
- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
|
||||
- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
|
||||
- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
|
||||
- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
|
||||
- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
|
||||
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
|
||||
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
|
||||
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
|
||||
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
|
||||
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
|
||||
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
|
||||
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
|
||||
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
|
||||
- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
|
||||
* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
|
||||
* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
|
||||
* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
|
||||
* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
|
||||
* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
|
||||
* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
|
||||
* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
|
||||
* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
|
||||
* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
|
||||
* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
|
||||
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
|
||||
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
|
||||
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
|
||||
* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
|
||||
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
|
||||
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
|
||||
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
|
||||
* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
|
||||
* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
|
||||
* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
|
||||
|
||||
# Synapse 1.109.0 (2024-06-18)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc3 (2024-06-17)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
20
Cargo.lock
generated
20
Cargo.lock
generated
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
@@ -234,9 +234,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.21"
|
||||
version = "0.4.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.203"
|
||||
version = "1.0.204"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
|
||||
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.203"
|
||||
version = "1.0.204"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
|
||||
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +505,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.117"
|
||||
version = "1.0.120"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
||||
71
README.rst
71
README.rst
@@ -1,21 +1,34 @@
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
|
||||
:height: 60px
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|support| |development| |documentation| |license| |pypi| |python|
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
|
||||
implementation, written and maintained by `Element <https://element.io>`_.
|
||||
`Matrix <https://github.com/matrix-org>`__ is the open standard for
|
||||
secure and interoperable real time communications. You can directly run
|
||||
and manage the source code in this repository, available under an AGPL
|
||||
license. There is no support provided from Element unless you have a
|
||||
subscription.
|
||||
|
||||
Subscription alternative
|
||||
========================
|
||||
|
||||
Alternatively, for those that need an enterprise-ready solution, Element
|
||||
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
|
||||
ESS builds on Synapse to offer a complete Matrix-based backend including the full
|
||||
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
|
||||
giving admins the power to easily manage an organization-wide
|
||||
deployment. It includes advanced identity management, auditing,
|
||||
moderation and data retention options as well as Long Term Support and
|
||||
SLAs. ESS can be used to support any Matrix-based frontend client.
|
||||
|
||||
.. contents::
|
||||
|
||||
Installing and configuration
|
||||
============================
|
||||
🛠️ Installing and configuration
|
||||
===============================
|
||||
|
||||
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
@@ -105,8 +118,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Testing a new installation
|
||||
==========================
|
||||
🧪 Testing a new installation
|
||||
=============================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
@@ -159,8 +172,20 @@ the form of::
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
🎯 Troubleshooting and support
|
||||
==============================
|
||||
|
||||
🚀 Professional support
|
||||
-----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
|
||||
and access the `knowledge base <https://ems-docs.element.io>`_.
|
||||
|
||||
🤝 Community support
|
||||
--------------------
|
||||
|
||||
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
@@ -176,8 +201,8 @@ issues for support requests, only for bug reports and feature requests.
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
🪪 Identity Servers
|
||||
===================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
@@ -206,8 +231,8 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Development
|
||||
===========
|
||||
🛠️ Development
|
||||
==============
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
@@ -225,8 +250,8 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success
|
||||
:alt: (get community support in #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding a federation /download endpoint (#17172).
|
||||
@@ -1 +0,0 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Fix searching for users with their exact localpart whose ID includes a hyphen.
|
||||
@@ -1 +0,0 @@
|
||||
Improve ratelimiting in Synapse (#17256).
|
||||
@@ -1 +0,0 @@
|
||||
Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
|
||||
@@ -1 +0,0 @@
|
||||
Add debug logging for when room keys are uploaded, including whether they are replacing other room keys.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Handle OTK uploads off master.
|
||||
@@ -1 +0,0 @@
|
||||
Fix wrong retention policy being used when filtering events.
|
||||
@@ -1 +0,0 @@
|
||||
Don't try and resync devices for remote users whose servers are marked as down.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where OTKs were not always included in `/sync` response when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Re-organize Pydantic models and types used in handlers.
|
||||
@@ -1 +0,0 @@
|
||||
Include user membership in events served to clients, per MSC4115.
|
||||
@@ -1 +0,0 @@
|
||||
Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
|
||||
@@ -1 +0,0 @@
|
||||
Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Bump `mypy` from 1.8.0 to 1.9.0.
|
||||
@@ -1 +0,0 @@
|
||||
Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.
|
||||
@@ -1 +0,0 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17425.misc
Normal file
1
changelog.d/17425.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix Docker image not building locally in some Windows environments.
|
||||
37
debian/changelog
vendored
37
debian/changelog
vendored
@@ -1,3 +1,40 @@
|
||||
matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Jul 2024 09:49:25 +0000
|
||||
|
||||
matrix-synapse-py3 (1.110.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 18:14:48 +0200
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
|
||||
|
||||
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
|
||||
* New Synapse release 1.110.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 14:07:56 +0200
|
||||
|
||||
matrix-synapse-py3 (1.109.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 09:45:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc3.
|
||||
|
||||
11
debian/register_new_matrix_user.ronn
vendored
11
debian/register_new_matrix_user.ronn
vendored
@@ -31,8 +31,12 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Local part of the new user. Will prompt if omitted.
|
||||
|
||||
* `-p`, `--password`:
|
||||
New password for user. Will prompt if omitted. Supplying the password
|
||||
on the command line is not recommended. Use the STDIN instead.
|
||||
New password for user. Will prompt if this option and `--password-file` are omitted.
|
||||
Supplying the password on the command line is not recommended.
|
||||
|
||||
* `--password-file`:
|
||||
File containing the new password for user. If set, overrides `--password`.
|
||||
This is a more secure alternative to specifying the password on the command line.
|
||||
|
||||
* `-a`, `--admin`:
|
||||
Register new user as an admin. Will prompt if omitted.
|
||||
@@ -44,6 +48,9 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Shared secret as defined in server config file. This is an optional
|
||||
parameter as it can be also supplied via the YAML file.
|
||||
|
||||
* `--exists-ok`:
|
||||
Do not fail if the user already exists. The user account will be not updated in this case.
|
||||
|
||||
* `server_url`:
|
||||
URL of the home server. Defaults to 'https://localhost:8448'.
|
||||
|
||||
|
||||
@@ -179,11 +179,13 @@ RUN \
|
||||
libicu72 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
dos2unix \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
RUN dos2unix /start.py
|
||||
|
||||
EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
|
||||
@@ -73,6 +73,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
# Required for building cffi from source.
|
||||
libffi-dev \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
|
||||
@@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# Delete any existing `.so` files to ensure a clean build.
|
||||
rm -f /synapse/build/synapse/*.so
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
|
||||
@@ -176,7 +176,6 @@ app_service_config_files:
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"listener_resources": ["media", "client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
@@ -125,6 +125,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
||||
@@ -1,21 +1,17 @@
|
||||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
@@ -35,7 +31,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
@@ -52,4 +48,4 @@ user like so:
|
||||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -36,6 +36,10 @@ The following query parameters are available:
|
||||
- the room's name,
|
||||
- the local part of the room's canonical alias, or
|
||||
- the complete (local and server part) room's id (case sensitive).
|
||||
* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
|
||||
* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
|
||||
|
||||
Defaults to no filtering.
|
||||
|
||||
|
||||
@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc, then the
|
||||
content for each `changelog.d` file and file extension should be the same. Towncrier
|
||||
will merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
@@ -117,6 +117,19 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint pattern:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -1759,8 +1759,9 @@ rc_3pid_validation:
|
||||
### `rc_invites`
|
||||
|
||||
This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
|
||||
defaults to `per_second: 0.3`, `burst_count: 10`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
@@ -1975,9 +1976,10 @@ This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
This will have no effect on media originating from the local server. This only
|
||||
affects media downloaded from other Matrix servers, to control URL previews see
|
||||
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
|
||||
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
@@ -2129,12 +2131,14 @@ url_preview_ip_range_whitelist:
|
||||
---
|
||||
### `url_preview_url_blacklist`
|
||||
|
||||
Optional list of URL matches that the URL preview spider is
|
||||
denied from accessing. You should use `url_preview_ip_range_blacklist`
|
||||
in preference to this, otherwise someone could define a public DNS
|
||||
entry that points to a private IP address and circumvent the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that
|
||||
you know that will never want synapse to try to spider.
|
||||
Optional list of URL matches that the URL preview spider is denied from
|
||||
accessing. This is a usability feature, not a security one. You should use
|
||||
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
|
||||
define a public DNS entry that points to a private IP address and circumvent
|
||||
the blacklist. Applications that perform redirects or serve different content
|
||||
when detecting that Synapse is accessing them can also bypass the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that you know
|
||||
that you do not want Synapse to preview.
|
||||
|
||||
Each list entry is a dictionary of url component attributes as returned
|
||||
by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
@@ -2718,7 +2722,7 @@ Example configuration:
|
||||
session_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
### `refreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
@@ -3806,7 +3810,8 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
|
||||
If set to true, will return all users known to the homeserver matching the search query.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
@@ -4150,7 +4155,7 @@ By default, no room is excluded.
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- !foo:example.com
|
||||
- "!foo:example.com"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -62,6 +62,6 @@ following documentation:
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
If you've found a security issue in Synapse or any other Element project,
|
||||
please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://element.io/security/security-disclosure-policy). Thank you!
|
||||
|
||||
@@ -739,6 +739,7 @@ An example for a federation sender instance:
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -96,3 +96,6 @@ ignore_missing_imports = True
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-multipart.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
871
poetry.lock
generated
871
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,7 @@ target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
# for error codes. The ones we ignore are:
|
||||
# E501: Line too long (black enforces this for us)
|
||||
@@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.109.0rc3"
|
||||
version = "1.111.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -224,6 +225,8 @@ pydantic = ">=1.7.4, <3"
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
@@ -319,7 +322,7 @@ all = [
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.3.7"
|
||||
ruff = "0.5.0"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet --fix "${files[@]}"
|
||||
ruff check --quiet --fix "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
|
||||
@@ -70,6 +70,7 @@ def cli() -> None:
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -112,10 +113,12 @@ def _prepare() -> None:
|
||||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -208,7 +211,15 @@ def _prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
@@ -231,6 +242,12 @@ def _prepare() -> None:
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
@@ -630,6 +647,9 @@ def _merge_back() -> None:
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
@@ -643,6 +663,9 @@ def _merge_back() -> None:
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
|
||||
@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
|
||||
@@ -52,6 +52,7 @@ def request_registration(
|
||||
user_type: Optional[str] = None,
|
||||
_print: Callable[[str], None] = print,
|
||||
exit: Callable[[int], None] = sys.exit,
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
@@ -97,6 +98,10 @@ def request_registration(
|
||||
r = requests.post(url, json=data)
|
||||
|
||||
if r.status_code != 200:
|
||||
response = r.json()
|
||||
if exists_ok and response["errcode"] == "M_USER_IN_USE":
|
||||
_print("User already exists. Skipping.")
|
||||
return
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -115,6 +120,7 @@ def register_new_user(
|
||||
shared_secret: str,
|
||||
admin: Optional[bool],
|
||||
user_type: Optional[str],
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
if not user:
|
||||
try:
|
||||
@@ -154,7 +160,13 @@ def register_new_user(
|
||||
admin = False
|
||||
|
||||
request_registration(
|
||||
user, password, server_location, shared_secret, bool(admin), user_type
|
||||
user,
|
||||
password,
|
||||
server_location,
|
||||
shared_secret,
|
||||
bool(admin),
|
||||
user_type,
|
||||
exists_ok=exists_ok,
|
||||
)
|
||||
|
||||
|
||||
@@ -174,10 +186,22 @@ def main() -> None:
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exists-ok",
|
||||
action="store_true",
|
||||
help="Do not fail if user already exists.",
|
||||
)
|
||||
password_group = parser.add_mutually_exclusive_group()
|
||||
password_group.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
help="New password for user. Will prompt for a password if "
|
||||
"this flag and `--password-file` are both omitted.",
|
||||
)
|
||||
password_group.add_argument(
|
||||
"--password-file",
|
||||
default=None,
|
||||
help="File containing the new password for user. If set, will override `--password`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
@@ -185,6 +209,7 @@ def main() -> None:
|
||||
default=None,
|
||||
help="User type as specified in synapse.api.constants.UserTypes",
|
||||
)
|
||||
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
@@ -247,6 +272,11 @@ def main() -> None:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.password_file:
|
||||
password = _read_file(args.password_file, "password-file").strip()
|
||||
else:
|
||||
password = args.password
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config is not None:
|
||||
@@ -270,7 +300,13 @@ def main() -> None:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(
|
||||
args.user, args.password, server_url, secret, admin, args.user_type
|
||||
args.user,
|
||||
password,
|
||||
server_url,
|
||||
secret,
|
||||
admin,
|
||||
args.user_type,
|
||||
exists_ok=args.exists_ok,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super().__init__(
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
@@ -87,6 +90,19 @@ class Auth(Protocol):
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Like `get_user_by_req`, except also checks if the user has access to
|
||||
the experimental feature. If they don't returns a 404 unrecognized
|
||||
request.
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
@@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
|
||||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.api.errors import (
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -48,6 +49,7 @@ from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -143,6 +145,18 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# metadata.validate_introspection_endpoint()
|
||||
return metadata
|
||||
|
||||
async def _introspection_endpoint(self) -> str:
|
||||
"""
|
||||
Returns the introspection endpoint of the issuer
|
||||
|
||||
It uses the config option if set, otherwise it will use OIDC discovery to get it
|
||||
"""
|
||||
if self._config.introspection_endpoint is not None:
|
||||
return self._config.introspection_endpoint
|
||||
|
||||
metadata = await self._load_metadata()
|
||||
return metadata.get("introspection_endpoint")
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
@@ -159,8 +173,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
metadata = await self._issuer_metadata.get()
|
||||
introspection_endpoint = metadata.get("introspection_endpoint")
|
||||
introspection_endpoint = await self._introspection_endpoint()
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": str(self._http_client.user_agent, "utf-8"),
|
||||
@@ -245,6 +258,32 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
|
||||
@@ -128,9 +128,13 @@ class EventTypes:
|
||||
SpaceParent: Final = "m.space.parent"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
Sticker: Final = "m.sticker"
|
||||
LiveLocationShareStart: Final = "m.beacon_info"
|
||||
|
||||
CallInvite: Final = "m.call.invite"
|
||||
|
||||
PollStart: Final = "m.poll.start"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
|
||||
@@ -130,7 +130,8 @@ class Ratelimiter:
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
update: Whether to count this check as performing the action. If the action
|
||||
cannot be performed, the user's action count is not incremented at all.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
|
||||
@@ -110,7 +110,7 @@ class AdminCmdStore(
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdStore # type: ignore
|
||||
DATASTORE_CLASS = AdminCmdStore
|
||||
|
||||
|
||||
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -74,6 +74,9 @@ from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.experimental_features import (
|
||||
ExperimentalFeaturesStore,
|
||||
)
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
@@ -155,6 +158,7 @@ class GenericWorkerStore(
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
ExperimentalFeaturesStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
@@ -163,7 +167,7 @@ class GenericWorkerStore(
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerStore # type: ignore
|
||||
DATASTORE_CLASS = GenericWorkerStore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
|
||||
@@ -140,6 +140,12 @@ class MSC3861:
|
||||
("experimental", "msc3861", "client_auth_method"),
|
||||
)
|
||||
|
||||
introspection_endpoint: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
)
|
||||
"""The URL of the introspection endpoint used to validate access tokens."""
|
||||
|
||||
account_management_url: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
@@ -433,9 +439,12 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
"msc3916_authenticated_media_enabled", False
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
# MSC4156: Migrate server_name to via
|
||||
self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
|
||||
|
||||
@@ -836,3 +836,21 @@ def maybe_upsert_event_field(
|
||||
del container[key]
|
||||
|
||||
return upsert_okay
|
||||
|
||||
|
||||
def strip_event(event: EventBase) -> JsonDict:
|
||||
"""
|
||||
Used for "stripped state" events which provide a simplified view of the state of a
|
||||
room intended to help a potential joiner identify the room (relevant when the user
|
||||
is invited or knocked).
|
||||
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
"state_key": event.state_key,
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
||||
@@ -1871,6 +1871,52 @@ class FederationClient(FederationBase):
|
||||
|
||||
return filtered_statuses, filtered_failures
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Union[
|
||||
Tuple[int, Dict[bytes, List[bytes]], bytes],
|
||||
Tuple[int, Dict[bytes, List[bytes]]],
|
||||
]:
|
||||
try:
|
||||
return await self.transport_layer.federation_download_media(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
|
||||
destination,
|
||||
media_id,
|
||||
)
|
||||
|
||||
return await self.transport_layer.download_media_v3(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media(
|
||||
self,
|
||||
destination: str,
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
import datetime
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -68,6 +69,10 @@ sent_edus_by_type = Counter(
|
||||
# If the retry interval is larger than this then we enter "catchup" mode
|
||||
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
|
||||
|
||||
# Limit how many presence states we add to each presence EDU, to ensure that
|
||||
# they are bounded in size.
|
||||
MAX_PRESENCE_STATES_PER_EDU = 50
|
||||
|
||||
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
@@ -144,7 +149,7 @@ class PerDestinationQueue:
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence: Dict[str, UserPresenceState] = {}
|
||||
self._pending_presence: OrderedDict[str, UserPresenceState] = OrderedDict()
|
||||
|
||||
# List of room_id -> receipt_type -> user_id -> receipt_dict,
|
||||
#
|
||||
@@ -333,12 +338,11 @@ class PerDestinationQueue:
|
||||
# not caught up yet
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
|
||||
async with _TransactionQueueManager(self) as (
|
||||
pending_pdus,
|
||||
pending_pdus, # noqa: F811
|
||||
pending_edus,
|
||||
):
|
||||
if not pending_pdus and not pending_edus:
|
||||
@@ -399,7 +403,7 @@ class PerDestinationQueue:
|
||||
# through another mechanism, because this is all volatile!
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_presence.clear()
|
||||
self._pending_receipt_edus = []
|
||||
|
||||
self._start_catching_up()
|
||||
@@ -721,22 +725,26 @@ class _TransactionQueueManager:
|
||||
|
||||
# Add presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
# Only send max 50 presence entries in the EDU, to bound the amount
|
||||
# of data we're sending.
|
||||
presence_to_add: List[JsonDict] = []
|
||||
while (
|
||||
self.queue._pending_presence
|
||||
and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU
|
||||
):
|
||||
_, presence = self.queue._pending_presence.popitem(last=False)
|
||||
presence_to_add.append(
|
||||
format_user_presence_state(presence, self.queue._clock.time_msec())
|
||||
)
|
||||
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type=EduTypes.PRESENCE,
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
content={"push": presence_to_add},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
|
||||
@@ -824,7 +824,6 @@ class TransportLayerClient:
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -852,7 +851,6 @@ class TransportLayerClient:
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -873,6 +871,29 @@ class TransportLayerClient:
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
path = f"/_matrix/federation/v1/media/download/{media_id}"
|
||||
return await self.client.federation_get_file(
|
||||
destination,
|
||||
path,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
args={
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
|
||||
"""
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -33,8 +32,9 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@@ -317,26 +317,11 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
if servletclass == FederationUnstableMediaDownloadServlet:
|
||||
if (
|
||||
not hs.config.server.enable_media_repo
|
||||
or not hs.config.experimental.msc3916_authenticated_media_enabled
|
||||
):
|
||||
continue
|
||||
|
||||
# don't load the endpoint if the storage provider is incompatible
|
||||
media_repo = hs.get_media_repository()
|
||||
load_download_endpoint = True
|
||||
for provider in media_repo.media_storage.storage_providers:
|
||||
signature = inspect.signature(provider.backend.fetch)
|
||||
if "federation" not in signature.parameters:
|
||||
logger.warning(
|
||||
f"Federation media `/download` endpoint will not be enabled as storage provider {provider.backend} is not compatible with this endpoint."
|
||||
)
|
||||
load_download_endpoint = False
|
||||
break
|
||||
|
||||
if not load_download_endpoint:
|
||||
if (
|
||||
servletclass == FederationMediaDownloadServlet
|
||||
or servletclass == FederationMediaThumbnailServlet
|
||||
):
|
||||
if not hs.config.server.enable_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
|
||||
@@ -362,7 +362,9 @@ class BaseFederationServlet:
|
||||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
@@ -374,7 +376,9 @@ class BaseFederationServlet:
|
||||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
|
||||
@@ -46,11 +46,13 @@ from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.media.thumbnailer import ThumbnailProvider
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
@@ -790,15 +792,14 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
||||
class FederationMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/form-data response consisting of a JSON object and the requested media
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
@@ -827,6 +828,59 @@ class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
||||
)
|
||||
|
||||
|
||||
class FederationMediaThumbnailServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
||||
self.thumbnail_provider = ThumbnailProvider(
|
||||
hs, self.media_repo, self.media_repo.media_storage
|
||||
)
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
|
||||
width = parse_integer(request, "width", required=True)
|
||||
height = parse_integer(request, "height", required=True)
|
||||
method = parse_string(request, "method", "scale")
|
||||
# TODO Parse the Accept header to get an prioritised list of thumbnail types.
|
||||
m_type = "image/png"
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnail_provider.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
@@ -858,5 +912,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
)
|
||||
|
||||
@@ -283,6 +283,10 @@ class DeactivateAccountHandler:
|
||||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
|
||||
# Mark the room forgotten too, because they won't be able to do this
|
||||
# for us. This may lead to the room being purged eventually.
|
||||
await self._room_member_handler.forget(user, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
|
||||
@@ -201,7 +201,7 @@ class MessageHandler:
|
||||
|
||||
if at_token:
|
||||
last_event_id = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=at_token.room_key,
|
||||
)
|
||||
@@ -642,6 +642,17 @@ class EventCreationHandler:
|
||||
"""
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
if event_dict["type"] == EventTypes.Message:
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending messages while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -979,89 +979,6 @@ class SyncHandler:
|
||||
bundled_aggregations=bundled_aggregations,
|
||||
)
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
async def compute_summary(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1435,7 +1352,7 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1519,7 +1436,7 @@ class SyncHandler:
|
||||
# We need to make sure the first event in our batch points to the
|
||||
# last event in the previous batch.
|
||||
last_event_id_prev_batch = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=since_token.room_key,
|
||||
)
|
||||
@@ -1563,11 +1480,13 @@ class SyncHandler:
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
if batch.limited:
|
||||
@@ -1585,14 +1504,14 @@ class SyncHandler:
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -2591,7 +2510,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self.get_state_at(
|
||||
old_state_ids = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
@@ -2621,12 +2540,14 @@ class SyncHandler:
|
||||
newly_left_rooms.append(room_id)
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = await self.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
)
|
||||
)
|
||||
old_mem_ev_id = old_state_ids.get(
|
||||
(EventTypes.Member, user_id), None
|
||||
|
||||
@@ -35,6 +35,8 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
@@ -1006,6 +1008,130 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
self._maybe_fail()
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class MultipartResponse:
|
||||
"""
|
||||
A small class to hold parsed values of a multipart response.
|
||||
"""
|
||||
|
||||
json: bytes = b"{}"
|
||||
length: Optional[int] = None
|
||||
content_type: Optional[bytes] = None
|
||||
disposition: Optional[bytes] = None
|
||||
url: Optional[bytes] = None
|
||||
|
||||
|
||||
class _MultipartParserProtocol(protocol.Protocol):
|
||||
"""
|
||||
Protocol to read and parse a MSC3916 multipart/mixed response
|
||||
"""
|
||||
|
||||
transport: Optional[ITCPTransport] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: ByteWriteable,
|
||||
deferred: defer.Deferred,
|
||||
boundary: str,
|
||||
max_length: Optional[int],
|
||||
) -> None:
|
||||
self.stream = stream
|
||||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
self.json_done = False
|
||||
self.file_length = 0
|
||||
self.total_length = 0
|
||||
self.in_disposition = False
|
||||
self.in_content_type = False
|
||||
|
||||
def dataReceived(self, incoming_data: bytes) -> None:
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
# we don't have a parser yet, instantiate it
|
||||
if not self.parser:
|
||||
|
||||
def on_header_field(data: bytes, start: int, end: int) -> None:
|
||||
if data[start:end] == b"Location":
|
||||
self.has_redirect = True
|
||||
if data[start:end] == b"Content-Disposition":
|
||||
self.in_disposition = True
|
||||
if data[start:end] == b"Content-Type":
|
||||
self.in_content_type = True
|
||||
|
||||
def on_header_value(data: bytes, start: int, end: int) -> None:
|
||||
# the first header should be content-type for application/json
|
||||
if not self.in_json and not self.json_done:
|
||||
assert data[start:end] == b"application/json"
|
||||
self.in_json = True
|
||||
elif self.has_redirect:
|
||||
self.multipart_response.url = data[start:end]
|
||||
elif self.in_content_type:
|
||||
self.multipart_response.content_type = data[start:end]
|
||||
self.in_content_type = False
|
||||
elif self.in_disposition:
|
||||
self.multipart_response.disposition = data[start:end]
|
||||
self.in_disposition = False
|
||||
|
||||
def on_part_data(data: bytes, start: int, end: int) -> None:
|
||||
# we've seen json header but haven't written the json data
|
||||
if self.in_json and not self.json_done:
|
||||
self.multipart_response.json = data[start:end]
|
||||
self.json_done = True
|
||||
# we have a redirect header rather than a file, and have already captured it
|
||||
elif self.has_redirect:
|
||||
return
|
||||
# otherwise we are in the file part
|
||||
else:
|
||||
logger.info("Writing multipart file data to stream")
|
||||
try:
|
||||
self.stream.write(data[start:end])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Exception encountered writing file data to stream: {e}"
|
||||
)
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
try:
|
||||
self.parser.write(incoming_data) # type: ignore[attr-defined]
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception writing to multipart parser: {e}")
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
# If the maximum size was already exceeded, there's nothing to do.
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self.multipart_response.length = self.file_length
|
||||
self.deferred.callback(self.multipart_response)
|
||||
else:
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
|
||||
|
||||
@@ -1091,6 +1217,32 @@ def read_body_with_max_size(
|
||||
return d
|
||||
|
||||
|
||||
def read_multipart_response(
|
||||
response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
|
||||
) -> "defer.Deferred[MultipartResponse]":
|
||||
"""
|
||||
Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
|
||||
the stream passed in and returning a deferred resolving to a MultipartResponse
|
||||
|
||||
Args:
|
||||
response: The HTTP response to read from.
|
||||
stream: The file-object to write to.
|
||||
boundary: the multipart/mixed boundary string
|
||||
max_length: maximum allowable length of the response
|
||||
"""
|
||||
d: defer.Deferred[MultipartResponse] = defer.Deferred()
|
||||
|
||||
# If the Content-Length header gives a size larger than the maximum allowed
|
||||
# size, do not bother downloading the body.
|
||||
if max_length is not None and response.length != UNKNOWN_LENGTH:
|
||||
if response.length > max_length:
|
||||
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
|
||||
return d
|
||||
|
||||
response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
|
||||
return d
|
||||
|
||||
|
||||
def encode_query_args(args: Optional[QueryParams]) -> bytes:
|
||||
"""
|
||||
Encodes a map of query arguments to bytes which can be appended to a URL.
|
||||
|
||||
@@ -75,9 +75,11 @@ from synapse.http.client import (
|
||||
BlocklistingAgentWrapper,
|
||||
BodyExceededMaxSize,
|
||||
ByteWriteable,
|
||||
SimpleHttpClient,
|
||||
_make_scheduler,
|
||||
encode_query_args,
|
||||
read_body_with_max_size,
|
||||
read_multipart_response,
|
||||
)
|
||||
from synapse.http.connectproxyclient import BearerProxyCredentials
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
@@ -466,6 +468,13 @@ class MatrixFederationHttpClient:
|
||||
|
||||
self._sleeper = AwakenableSleeper(self.reactor)
|
||||
|
||||
self._simple_http_client = SimpleHttpClient(
|
||||
hs,
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
|
||||
use_proxy=True,
|
||||
)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
|
||||
@@ -1553,6 +1562,189 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
return length, headers
|
||||
|
||||
async def federation_get_file(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
ignore_backoff: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
"""GETs a file from a given homeserver over the federation /download endpoint
|
||||
Args:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
Resolves to an (int, dict, bytes) tuple of
|
||||
the file length, a dict of the response headers, and the file json
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits or the response from the
|
||||
remote server is not a multipart response
|
||||
AssertionError: if the resolved multipart response's length is None
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
# this should be a multipart/mixed response with the boundary string in the header
|
||||
try:
|
||||
raw_content_type = headers.get(b"Content-Type")
|
||||
assert raw_content_type is not None
|
||||
content_type = raw_content_type[0].decode("UTF-8")
|
||||
content_type_parts = content_type.split("boundary=")
|
||||
boundary = content_type_parts[1]
|
||||
except Exception:
|
||||
msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
except defer.TimeoutError as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Timed out reading response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except ResponseFailed as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to read response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
multipart_response = await make_deferred_yieldable(deferred)
|
||||
if not multipart_response.url:
|
||||
assert multipart_response.length is not None
|
||||
length = multipart_response.length
|
||||
headers[b"Content-Type"] = [multipart_response.content_type]
|
||||
headers[b"Content-Disposition"] = [multipart_response.disposition]
|
||||
|
||||
# the response contained a redirect url to download the file from
|
||||
else:
|
||||
str_url = multipart_response.url.decode("utf-8")
|
||||
logger.info(
|
||||
"{%s} [%s] File download redirected, now downloading from: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
str_url,
|
||||
)
|
||||
length, headers, _, _ = await self._simple_http_client.get_file(
|
||||
str_url, output_stream, expected_size
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode("ascii", errors="replace"),
|
||||
length,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
return length, headers, multipart_response.json
|
||||
|
||||
|
||||
def _flatten_response_never_received(e: BaseException) -> str:
|
||||
if hasattr(e, "reasons"):
|
||||
|
||||
@@ -119,14 +119,15 @@ def parse_integer(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
Returns:
|
||||
An int value or the default.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
@@ -164,7 +165,7 @@ def parse_integer_from_args(
|
||||
name: str,
|
||||
default: Optional[int] = None,
|
||||
required: bool = False,
|
||||
negative: bool = True,
|
||||
negative: bool = False,
|
||||
) -> Optional[int]:
|
||||
"""Parse an integer parameter from the request string
|
||||
|
||||
@@ -174,7 +175,8 @@ def parse_integer_from_args(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
|
||||
Returns:
|
||||
An int value or the default.
|
||||
@@ -182,7 +184,7 @@ def parse_integer_from_args(
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
name_bytes = name.encode("ascii")
|
||||
|
||||
|
||||
@@ -46,10 +46,10 @@ from synapse.api.errors import Codes, cs_error
|
||||
from synapse.http.server import finish_request, respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import is_ascii
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_storage import MultipartResponder
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
|
||||
@@ -221,6 +221,7 @@ def add_file_headers(
|
||||
# select private. don't bother setting Expires as all our
|
||||
# clients are smart enough to be happy with Cache-Control
|
||||
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
|
||||
|
||||
if file_size is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (file_size,))
|
||||
|
||||
@@ -275,16 +276,19 @@ def _can_encode_filename_as_token(x: str) -> bool:
|
||||
|
||||
|
||||
async def respond_with_multipart_responder(
|
||||
clock: Clock,
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[MultipartResponder]",
|
||||
responder: "Optional[Responder]",
|
||||
media_info: "LocalMedia",
|
||||
) -> None:
|
||||
"""
|
||||
Responds via a Multipart responder for the federation media `/download` requests
|
||||
Responds to requests originating from the federation media `/download` endpoint by
|
||||
streaming a multipart/mixed response
|
||||
|
||||
Args:
|
||||
clock:
|
||||
request: the federation request to respond to
|
||||
responder: the Multipart responder which will send the response
|
||||
responder: the responder which will send the response
|
||||
media_info: metadata about the media item
|
||||
"""
|
||||
if not responder:
|
||||
@@ -299,15 +303,52 @@ async def respond_with_multipart_responder(
|
||||
)
|
||||
return
|
||||
|
||||
if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
|
||||
disposition = "inline"
|
||||
else:
|
||||
disposition = "attachment"
|
||||
|
||||
def _quote(x: str) -> str:
|
||||
return urllib.parse.quote(x.encode("utf-8"))
|
||||
|
||||
if media_info.upload_name:
|
||||
if _can_encode_filename_as_token(media_info.upload_name):
|
||||
disposition = "%s; filename=%s" % (
|
||||
disposition,
|
||||
media_info.upload_name,
|
||||
)
|
||||
else:
|
||||
disposition = "%s; filename*=utf-8''%s" % (
|
||||
disposition,
|
||||
_quote(media_info.upload_name),
|
||||
)
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock,
|
||||
request,
|
||||
media_info.media_type,
|
||||
{},
|
||||
disposition,
|
||||
media_info.media_length,
|
||||
)
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
if media_info.media_length is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (media_info.media_length,))
|
||||
content_length = multipart_consumer.content_length()
|
||||
assert content_length is not None
|
||||
request.setHeader(b"Content-Length", b"%d" % (content_length,))
|
||||
|
||||
request.setHeader(
|
||||
b"Content-Type", b"multipart/mixed; boundary=%s" % responder.boundary
|
||||
b"Content-Type",
|
||||
b"multipart/mixed; boundary=%s" % multipart_consumer.boundary,
|
||||
)
|
||||
|
||||
try:
|
||||
await responder.write_to_consumer(request)
|
||||
await responder.write_to_consumer(multipart_consumer)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
|
||||
@@ -58,7 +58,7 @@ from synapse.media._base import (
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
from synapse.media.media_storage import MediaStorage, MultipartResponder
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
@@ -462,13 +462,11 @@ class MediaRepository:
|
||||
|
||||
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
|
||||
|
||||
responder = await self.media_storage.fetch_media(
|
||||
file_info, media_info, federation
|
||||
)
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if federation:
|
||||
# this really should be a Multipart responder but just in case
|
||||
assert isinstance(responder, MultipartResponder)
|
||||
await respond_with_multipart_responder(request, responder, media_info)
|
||||
await respond_with_multipart_responder(
|
||||
self.clock, request, responder, media_info
|
||||
)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
@@ -482,6 +480,7 @@ class MediaRepository:
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
) -> None:
|
||||
"""Respond to requests for remote media.
|
||||
|
||||
@@ -494,6 +493,8 @@ class MediaRepository:
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new
|
||||
federation `/download` endpoint
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -524,6 +525,7 @@ class MediaRepository:
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation_endpoint,
|
||||
)
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
@@ -540,7 +542,12 @@ class MediaRepository:
|
||||
respond_404(request)
|
||||
|
||||
async def get_remote_media_info(
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> RemoteMedia:
|
||||
"""Gets the media info associated with the remote file, downloading
|
||||
if necessary.
|
||||
@@ -551,6 +558,8 @@ class MediaRepository:
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: IP address of the requester
|
||||
use_federation: if a download is necessary, whether to request the remote file
|
||||
over the federation `/download` endpoint
|
||||
|
||||
Returns:
|
||||
The media info of the file
|
||||
@@ -571,6 +580,7 @@ class MediaRepository:
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation,
|
||||
)
|
||||
|
||||
# Ensure we actually use the responder so that it releases resources
|
||||
@@ -587,6 +597,7 @@ class MediaRepository:
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
) -> Tuple[Optional[Responder], RemoteMedia]:
|
||||
"""Looks for media in local cache, if not there then attempt to
|
||||
download from remote server.
|
||||
@@ -600,6 +611,8 @@ class MediaRepository:
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new federation
|
||||
/download endpoint
|
||||
|
||||
Returns:
|
||||
A tuple of responder and the media info of the file.
|
||||
@@ -631,9 +644,23 @@ class MediaRepository:
|
||||
# Failed to find the file anywhere, lets download it.
|
||||
|
||||
try:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
|
||||
)
|
||||
if not use_federation_endpoint:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
else:
|
||||
media_info = await self._federation_download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
@@ -777,6 +804,129 @@ class MediaRepository:
|
||||
quarantined_by=None,
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> RemoteMedia:
|
||||
"""Attempt to download the remote file from the given server name.
|
||||
Uses the given file_id as the local id and downloads the file over the federation
|
||||
v1 download endpoint
|
||||
|
||||
Args:
|
||||
server_name: Originating server
|
||||
media_id: The media ID of the content (as defined by the
|
||||
remote server). This is different than the file_id, which is
|
||||
locally generated.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
The media info of the file.
|
||||
"""
|
||||
|
||||
file_id = random_string(24)
|
||||
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
try:
|
||||
res = await self.client.federation_download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
# if we had to fall back to the _matrix/media endpoint it will only return
|
||||
# the headers and length, check the length of the tuple before unpacking
|
||||
if len(res) == 3:
|
||||
length, headers, json = res
|
||||
else:
|
||||
length, headers = res
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Request failed fetching remote media %s/%s: %r",
|
||||
server_name,
|
||||
media_id,
|
||||
e,
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"HTTP error fetching remote media %s/%s: %s",
|
||||
server_name,
|
||||
media_id,
|
||||
e.response,
|
||||
)
|
||||
if e.code == twisted.web.http.NOT_FOUND:
|
||||
raise e.to_synapse_error()
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except SynapseError:
|
||||
logger.warning(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warning("Not retrying destination %r", server_name)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
if b"Content-Type" in headers:
|
||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||
else:
|
||||
media_type = "application/octet-stream"
|
||||
upload_name = get_filename_from_headers(headers)
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
# Multiple remote media download requests can race (when using
|
||||
# multiple media repos), so this may throw a violation constraint
|
||||
# exception. If it does we'll delete the newly downloaded file from
|
||||
# disk (as we're in the ctx manager).
|
||||
#
|
||||
# However: we've already called `finish()` so we may have also
|
||||
# written to the storage providers. This is preferable to the
|
||||
# alternative where we call `finish()` *after* this, where we could
|
||||
# end up having an entry in the DB but fail to write the files to
|
||||
# the storage providers.
|
||||
await self.store.store_cached_remote_media(
|
||||
origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
time_now_ms=time_now_ms,
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
)
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
media_length=length,
|
||||
upload_name=upload_name,
|
||||
created_ts=time_now_ms,
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
self, media_type: str
|
||||
) -> Tuple[ThumbnailRequirement, ...]:
|
||||
|
||||
@@ -39,30 +39,34 @@ from typing import (
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, interfaces
|
||||
from twisted.internet import interfaces
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
from twisted.protocols.basic import FileSender
|
||||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
|
||||
from synapse.logging.context import (
|
||||
defer_to_thread,
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ..types import JsonDict
|
||||
from ._base import FileInfo, Responder
|
||||
from .filepath import MediaFilePaths
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.storage_provider import StorageProvider
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -85,7 +89,7 @@ class MediaStorage:
|
||||
hs: "HomeServer",
|
||||
local_media_directory: str,
|
||||
filepaths: MediaFilePaths,
|
||||
storage_providers: Sequence["StorageProviderWrapper"],
|
||||
storage_providers: Sequence["StorageProvider"],
|
||||
):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
@@ -181,23 +185,15 @@ class MediaStorage:
|
||||
|
||||
raise e from None
|
||||
|
||||
async def fetch_media(
|
||||
self,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""Attempts to fetch media described by file_info from the local cache
|
||||
and configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info: Metadata about the media file
|
||||
media_info: Metadata about the media item
|
||||
federation: Whether this file is being fetched for a federation request
|
||||
|
||||
Returns:
|
||||
If the file was found returns a Responder (a Multipart Responder if the requested
|
||||
file is for the federation /download endpoint), otherwise None.
|
||||
Returns a Responder if the file was found, otherwise None.
|
||||
"""
|
||||
paths = [self._file_info_to_path(file_info)]
|
||||
|
||||
@@ -217,19 +213,12 @@ class MediaStorage:
|
||||
local_path = os.path.join(self.local_media_directory, path)
|
||||
if os.path.exists(local_path):
|
||||
logger.debug("responding with local file %s", local_path)
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(local_path, "rb"), media_info, boundary
|
||||
)
|
||||
else:
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
logger.debug("local file %s did not exist", local_path)
|
||||
|
||||
for provider in self.storage_providers:
|
||||
for path in paths:
|
||||
res: Any = await provider.fetch(path, file_info, media_info, federation)
|
||||
res: Any = await provider.fetch(path, file_info)
|
||||
if res:
|
||||
logger.debug("Streaming %s from %s", path, provider)
|
||||
return res
|
||||
@@ -364,38 +353,6 @@ class FileResponder(Responder):
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class MultipartResponder(Responder):
|
||||
"""Wraps an open file, formats the response according to MSC3916 and sends it to a
|
||||
federation request.
|
||||
|
||||
Args:
|
||||
open_file: A file like object to be streamed to the client,
|
||||
is closed when finished streaming.
|
||||
media_info: metadata about the media item
|
||||
boundary: bytes to use for the multipart response boundary
|
||||
"""
|
||||
|
||||
def __init__(self, open_file: IO, media_info: LocalMedia, boundary: bytes) -> None:
|
||||
self.open_file = open_file
|
||||
self.media_info = media_info
|
||||
self.boundary = boundary
|
||||
|
||||
def write_to_consumer(self, consumer: IConsumer) -> Deferred:
|
||||
return make_deferred_yieldable(
|
||||
MultipartFileSender().beginFileTransfer(
|
||||
self.open_file, consumer, self.media_info.media_type, {}, self.boundary
|
||||
)
|
||||
)
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc_val: Optional[BaseException],
|
||||
exc_tb: Optional[TracebackType],
|
||||
) -> None:
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class SpamMediaException(NotFoundError):
|
||||
"""The media was blocked by a spam checker, so we simply 404 the request (in
|
||||
the same way as if it was quarantined).
|
||||
@@ -431,105 +388,199 @@ class ReadableFileWrapper:
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
@implementer(interfaces.IProducer)
|
||||
class MultipartFileSender:
|
||||
"""
|
||||
A producer that sends the contents of a file to a federation request in the format
|
||||
outlined in MSC3916 - a multipart/format-data response where the first field is a
|
||||
JSON object and the second is the requested file.
|
||||
|
||||
This is a slight re-writing of twisted.protocols.basic.FileSender to achieve the format
|
||||
outlined above.
|
||||
@implementer(interfaces.IConsumer)
|
||||
@implementer(interfaces.IPushProducer)
|
||||
class MultipartFileConsumer:
|
||||
"""Wraps a given consumer so that any data that gets written to it gets
|
||||
converted to a multipart format.
|
||||
"""
|
||||
|
||||
CHUNK_SIZE = 2**14
|
||||
|
||||
lastSent = ""
|
||||
deferred: Optional[defer.Deferred] = None
|
||||
|
||||
def beginFileTransfer(
|
||||
def __init__(
|
||||
self,
|
||||
file: IO,
|
||||
consumer: IConsumer,
|
||||
clock: Clock,
|
||||
wrapped_consumer: interfaces.IConsumer,
|
||||
file_content_type: str,
|
||||
json_object: JsonDict,
|
||||
boundary: bytes,
|
||||
) -> Deferred:
|
||||
"""
|
||||
Begin transferring a file
|
||||
|
||||
Args:
|
||||
file: The file object to read data from
|
||||
consumer: The synapse request to write the data to
|
||||
file_content_type: The content-type of the file
|
||||
json_object: The JSON object to write to the first field of the response
|
||||
boundary: bytes to be used as the multipart/form-data boundary
|
||||
|
||||
Returns: A deferred whose callback will be invoked when the file has
|
||||
been completely written to the consumer. The last byte written to the
|
||||
consumer is passed to the callback.
|
||||
"""
|
||||
self.file: Optional[IO] = file
|
||||
self.consumer = consumer
|
||||
disposition: str,
|
||||
content_length: Optional[int],
|
||||
) -> None:
|
||||
self.clock = clock
|
||||
self.wrapped_consumer = wrapped_consumer
|
||||
self.json_field = json_object
|
||||
self.json_field_written = False
|
||||
self.content_type_written = False
|
||||
self.file_headers_written = False
|
||||
self.file_content_type = file_content_type
|
||||
self.boundary = boundary
|
||||
self.deferred: Deferred = defer.Deferred()
|
||||
self.consumer.registerProducer(self, False)
|
||||
# while it's not entirely clear why this assignment is necessary, it mirrors
|
||||
# the behavior in FileSender.beginFileTransfer and thus is preserved here
|
||||
deferred = self.deferred
|
||||
return deferred
|
||||
self.boundary = uuid4().hex.encode("ascii")
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
# write the first field, which will always be a json field
|
||||
# The producer that registered with us, and if it's a push or pull
|
||||
# producer.
|
||||
self.producer: Optional["interfaces.IProducer"] = None
|
||||
self.streaming: Optional[bool] = None
|
||||
|
||||
# Whether the wrapped consumer has asked us to pause.
|
||||
self.paused = False
|
||||
|
||||
self.length = content_length
|
||||
self.disposition = disposition
|
||||
|
||||
### IConsumer APIs ###
|
||||
|
||||
def registerProducer(
|
||||
self, producer: "interfaces.IProducer", streaming: bool
|
||||
) -> None:
|
||||
"""
|
||||
Register to receive data from a producer.
|
||||
|
||||
This sets self to be a consumer for a producer. When this object runs
|
||||
out of data (as when a send(2) call on a socket succeeds in moving the
|
||||
last data from a userspace buffer into a kernelspace buffer), it will
|
||||
ask the producer to resumeProducing().
|
||||
|
||||
For L{IPullProducer} providers, C{resumeProducing} will be called once
|
||||
each time data is required.
|
||||
|
||||
For L{IPushProducer} providers, C{pauseProducing} will be called
|
||||
whenever the write buffer fills up and C{resumeProducing} will only be
|
||||
called when it empties. The consumer will only call C{resumeProducing}
|
||||
to balance a previous C{pauseProducing} call; the producer is assumed
|
||||
to start in an un-paused state.
|
||||
|
||||
@param streaming: C{True} if C{producer} provides L{IPushProducer},
|
||||
C{False} if C{producer} provides L{IPullProducer}.
|
||||
|
||||
@raise RuntimeError: If a producer is already registered.
|
||||
"""
|
||||
self.producer = producer
|
||||
self.streaming = streaming
|
||||
|
||||
self.wrapped_consumer.registerProducer(self, True)
|
||||
|
||||
# kick off producing if `self.producer` is not a streaming producer
|
||||
if not streaming:
|
||||
self.resumeProducing()
|
||||
|
||||
def unregisterProducer(self) -> None:
|
||||
"""
|
||||
Stop consuming data from a producer, without disconnecting.
|
||||
"""
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.wrapped_consumer.unregisterProducer()
|
||||
self.paused = True
|
||||
|
||||
def write(self, data: bytes) -> None:
|
||||
"""
|
||||
The producer will write data by calling this method.
|
||||
|
||||
The implementation must be non-blocking and perform whatever
|
||||
buffering is necessary. If the producer has provided enough data
|
||||
for now and it is a L{IPushProducer}, the consumer may call its
|
||||
C{pauseProducing} method.
|
||||
"""
|
||||
if not self.json_field_written:
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
content_type = Header(b"Content-Type", b"application/json")
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
self.consumer.write(json_bytes)
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
self.wrapped_consumer.write(CRLF + json_bytes)
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
self.json_field_written = True
|
||||
|
||||
chunk: Any = ""
|
||||
if self.file:
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.content_type_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
self.content_type_written = True
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.file_headers_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
disp_header = Header(b"Content-Disposition", self.disposition)
|
||||
self.wrapped_consumer.write(bytes(disp_header) + CRLF + CRLF)
|
||||
self.file_headers_written = True
|
||||
|
||||
chunk = self.file.read(self.CHUNK_SIZE)
|
||||
self.wrapped_consumer.write(data)
|
||||
|
||||
if not chunk:
|
||||
# we've reached the end of the file
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.file = None
|
||||
self.consumer.unregisterProducer()
|
||||
|
||||
if self.deferred:
|
||||
self.deferred.callback(self.lastSent)
|
||||
self.deferred = None
|
||||
return
|
||||
|
||||
self.consumer.write(chunk)
|
||||
self.lastSent = chunk[-1:]
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
pass
|
||||
### IPushProducer APIs ###
|
||||
|
||||
def stopProducing(self) -> None:
|
||||
if self.deferred:
|
||||
self.deferred.errback(Exception("Consumer asked us to stop producing"))
|
||||
self.deferred = None
|
||||
"""
|
||||
Stop producing data.
|
||||
|
||||
This tells a producer that its consumer has died, so it must stop
|
||||
producing data for good.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
self.paused = True
|
||||
self.producer.stopProducing()
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
"""
|
||||
Pause producing data.
|
||||
|
||||
Tells a producer that it has produced too much data to process for
|
||||
the time being, and to stop until C{resumeProducing()} is called.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
self.paused = True
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).pauseProducing()
|
||||
else:
|
||||
self.paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
"""
|
||||
Resume producing data.
|
||||
|
||||
This tells a producer to re-add itself to the main loop and produce
|
||||
more data for its consumer.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).resumeProducing()
|
||||
else:
|
||||
# If the producer is not a streaming producer we need to start
|
||||
# repeatedly calling `resumeProducing` in a loop.
|
||||
run_in_background(self._resumeProducingRepeatedly)
|
||||
|
||||
def content_length(self) -> Optional[int]:
|
||||
"""
|
||||
Calculate the content length of the multipart response
|
||||
in bytes.
|
||||
"""
|
||||
if not self.length:
|
||||
return None
|
||||
# calculate length of json field and content-type, disposition headers
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
json_length = len(json_bytes)
|
||||
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
type_length = len(bytes(content_type))
|
||||
|
||||
disp = self.disposition.encode("utf-8")
|
||||
disp_header = Header(b"Content-Disposition", disp)
|
||||
disp_length = len(bytes(disp_header))
|
||||
|
||||
# 156 is the length of the elements that aren't variable, ie
|
||||
# CRLFs and boundary strings, etc
|
||||
self.length += json_length + type_length + disp_length + 156
|
||||
|
||||
return self.length
|
||||
|
||||
### Internal APIs. ###
|
||||
|
||||
async def _resumeProducingRepeatedly(self) -> None:
|
||||
assert self.producer is not None
|
||||
assert not self.streaming
|
||||
producer = cast("interfaces.IPullProducer", self.producer)
|
||||
|
||||
self.paused = False
|
||||
while not self.paused:
|
||||
producer.resumeProducing()
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
class Header:
|
||||
|
||||
@@ -24,16 +24,14 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.logging.context import defer_to_thread, run_in_background
|
||||
from synapse.logging.opentracing import start_active_span, trace_with_opname
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ._base import FileInfo, Responder
|
||||
from .media_storage import FileResponder, MultipartResponder
|
||||
from .media_storage import FileResponder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -57,21 +55,13 @@ class StorageProvider(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""Attempt to fetch the file described by file_info and stream it
|
||||
into writer.
|
||||
|
||||
Args:
|
||||
path: Relative path of file in local cache
|
||||
file_info: The metadata of the file.
|
||||
media_info: metadata of the media item
|
||||
federation: Whether the requested media is for a federation request
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the provider has the file, otherwise returns None.
|
||||
@@ -134,13 +124,7 @@ class StorageProviderWrapper(StorageProvider):
|
||||
run_in_background(store)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.fetch")
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
if file_info.url_cache:
|
||||
# Files in the URL preview cache definitely aren't stored here,
|
||||
# so avoid any potentially slow I/O or network access.
|
||||
@@ -148,9 +132,7 @@ class StorageProviderWrapper(StorageProvider):
|
||||
|
||||
# store_file is supposed to return an Awaitable, but guard
|
||||
# against improper implementations.
|
||||
return await maybe_awaitable(
|
||||
self.backend.fetch(path, file_info, media_info, federation)
|
||||
)
|
||||
return await maybe_awaitable(self.backend.fetch(path, file_info))
|
||||
|
||||
|
||||
class FileStorageProviderBackend(StorageProvider):
|
||||
@@ -190,23 +172,11 @@ class FileStorageProviderBackend(StorageProvider):
|
||||
)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.fetch")
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""See StorageProvider.fetch"""
|
||||
|
||||
backup_fname = os.path.join(self.base_directory, path)
|
||||
if os.path.isfile(backup_fname):
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(backup_fname, "rb"), media_info, boundary
|
||||
)
|
||||
return FileResponder(open(backup_fname, "rb"))
|
||||
|
||||
return None
|
||||
|
||||
@@ -36,9 +36,11 @@ from synapse.media._base import (
|
||||
ThumbnailInfo,
|
||||
respond_404,
|
||||
respond_with_file,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import FileResponder, MediaStorage
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
@@ -271,6 +273,7 @@ class ThumbnailProvider:
|
||||
method: str,
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
@@ -290,6 +293,8 @@ class ThumbnailProvider:
|
||||
media_id,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
server_name=None,
|
||||
for_federation=for_federation,
|
||||
media_info=media_info,
|
||||
)
|
||||
|
||||
async def select_or_generate_local_thumbnail(
|
||||
@@ -301,6 +306,7 @@ class ThumbnailProvider:
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
@@ -326,10 +332,16 @@ class ThumbnailProvider:
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("We don't have a thumbnail of that size. Generating")
|
||||
|
||||
@@ -344,7 +356,15 @@ class ThumbnailProvider:
|
||||
)
|
||||
|
||||
if file_path:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(),
|
||||
request,
|
||||
FileResponder(open(file_path, "rb")),
|
||||
media_info,
|
||||
)
|
||||
else:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
raise SynapseError(400, "Failed to generate thumbnail.")
|
||||
@@ -360,9 +380,10 @@ class ThumbnailProvider:
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
)
|
||||
if not media_info:
|
||||
respond_404(request)
|
||||
@@ -424,12 +445,13 @@ class ThumbnailProvider:
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> None:
|
||||
# TODO: Don't download the whole remote file
|
||||
# We should proxy the thumbnail from the remote server instead of
|
||||
# downloading the remote file and generating our own thumbnails.
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
@@ -448,6 +470,7 @@ class ThumbnailProvider:
|
||||
media_info.filesystem_id,
|
||||
url_cache=False,
|
||||
server_name=server_name,
|
||||
for_federation=False,
|
||||
)
|
||||
|
||||
async def _select_and_respond_with_thumbnail(
|
||||
@@ -461,7 +484,9 @@ class ThumbnailProvider:
|
||||
media_id: str,
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
for_federation: bool,
|
||||
server_name: Optional[str] = None,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
|
||||
@@ -476,6 +501,8 @@ class ThumbnailProvider:
|
||||
file_id: The ID of the media that a thumbnail is being requested for.
|
||||
url_cache: True if this is from a URL cache.
|
||||
server_name: The server name, if this is a remote thumbnail.
|
||||
for_federation: whether the request is from the federation /thumbnail request
|
||||
media_info: metadata about the media being requested.
|
||||
"""
|
||||
logger.debug(
|
||||
"_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
|
||||
@@ -511,13 +538,20 @@ class ThumbnailProvider:
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
|
||||
# If we can't find the thumbnail we regenerate it. This can happen
|
||||
# if e.g. we've deleted the thumbnails but still have the original
|
||||
@@ -558,12 +592,18 @@ class ThumbnailProvider:
|
||||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
else:
|
||||
# This might be because:
|
||||
# 1. We can't create thumbnails for the given media (corrupted or
|
||||
|
||||
@@ -764,6 +764,13 @@ class Notifier:
|
||||
|
||||
async def wait_for_stream_token(self, stream_token: StreamToken) -> bool:
|
||||
"""Wait for this worker to catch up with the given stream token."""
|
||||
current_token = self.event_sources.get_current_token()
|
||||
if stream_token.is_before_or_eq(current_token):
|
||||
return True
|
||||
|
||||
# Work around a bug where older Synapse versions gave out tokens "from
|
||||
# the future", i.e. that are ahead of the tokens persisted in the DB.
|
||||
stream_token = await self.event_sources.bound_future_token(stream_token)
|
||||
|
||||
start = self.clock.time_msec()
|
||||
while True:
|
||||
|
||||
@@ -28,7 +28,7 @@ import jinja2
|
||||
from markupsafe import Markup
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.config.emailconfig import EmailSubjectConfig
|
||||
from synapse.events import EventBase
|
||||
@@ -716,7 +716,8 @@ class Mailer:
|
||||
)
|
||||
if (
|
||||
create_event
|
||||
and create_event.content.get("room_type") == RoomTypes.SPACE
|
||||
and create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
== RoomTypes.SPACE
|
||||
):
|
||||
return self.email_subjects.invite_from_person_to_space % {
|
||||
"person": inviter_name,
|
||||
|
||||
@@ -114,13 +114,19 @@ class ReplicationDataHandler:
|
||||
"""
|
||||
all_room_ids: Set[str] = set()
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
if any(row.entity.startswith("@") and not row.is_signature for row in rows):
|
||||
if any(not row.is_signature and not row.hosts_calculated for row in rows):
|
||||
prev_token = self.store.get_device_stream_token()
|
||||
all_room_ids = await self.store.get_all_device_list_changes(
|
||||
prev_token, token
|
||||
)
|
||||
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
|
||||
|
||||
# If we're sending federation we need to update the device lists
|
||||
# outbound pokes stream change cache with updated hosts.
|
||||
if self.send_handler and any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
self.store.device_lists_outbound_pokes_have_changed(hosts, token)
|
||||
|
||||
self.store.process_replication_rows(stream_name, instance_name, token, rows)
|
||||
# NOTE: this must be called after process_replication_rows to ensure any
|
||||
# cache invalidations are first handled before any stream ID advances.
|
||||
@@ -433,12 +439,11 @@ class FederationSenderHandler:
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
hosts = {
|
||||
row.entity
|
||||
for row in rows
|
||||
if not row.entity.startswith("@") and not row.is_signature
|
||||
}
|
||||
await self.federation_sender.send_device_messages(hosts, immediate=False)
|
||||
if any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
await self.federation_sender.send_device_messages(
|
||||
hosts, immediate=False
|
||||
)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
|
||||
@@ -549,10 +549,14 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DeviceListsStreamRow:
|
||||
entity: str
|
||||
user_id: str
|
||||
# Indicates that a user has signed their own device with their user-signing key
|
||||
is_signature: bool
|
||||
|
||||
# Indicates if this is a notification that we've calculated the hosts we
|
||||
# need to send the update to.
|
||||
hosts_calculated: bool
|
||||
|
||||
NAME = "device_lists"
|
||||
ROW_TYPE = DeviceListsStreamRow
|
||||
|
||||
@@ -594,13 +598,13 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
upper_limit_token = min(upper_limit_token, signatures_to_token)
|
||||
|
||||
device_updates = [
|
||||
(stream_id, (entity, False))
|
||||
for stream_id, (entity,) in device_updates
|
||||
(stream_id, (entity, False, hosts))
|
||||
for stream_id, (entity, hosts) in device_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
signatures_updates = [
|
||||
(stream_id, (entity, True))
|
||||
(stream_id, (entity, True, False))
|
||||
for stream_id, (entity,) in signatures_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
@@ -145,6 +145,10 @@ class ClientRestResource(JsonResource):
|
||||
password_policy.register_servlets(hs, client_resource)
|
||||
knock.register_servlets(hs, client_resource)
|
||||
appservice_ping.register_servlets(hs, client_resource)
|
||||
if hs.config.server.enable_media_repo:
|
||||
from synapse.rest.client import media
|
||||
|
||||
media.register_servlets(hs, client_resource)
|
||||
|
||||
# moving to /_synapse/admin
|
||||
if is_main_process:
|
||||
|
||||
@@ -101,6 +101,7 @@ from synapse.rest.admin.users import (
|
||||
ResetPasswordRestServlet,
|
||||
SearchUsersRestServlet,
|
||||
ShadowBanRestServlet,
|
||||
SuspendAccountRestServlet,
|
||||
UserAdminServlet,
|
||||
UserByExternalId,
|
||||
UserByThreePid,
|
||||
@@ -327,6 +328,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BackgroundUpdateRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
ExperimentalFeaturesRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc3823_account_suspension:
|
||||
SuspendAccountRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
|
||||
@@ -31,7 +31,9 @@ from synapse.rest.admin import admin_patterns, assert_requester_is_admin
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from synapse.server import HomeServer, HomeServerConfig
|
||||
|
||||
|
||||
class ExperimentalFeature(str, Enum):
|
||||
@@ -39,8 +41,16 @@ class ExperimentalFeature(str, Enum):
|
||||
Currently supported per-user features
|
||||
"""
|
||||
|
||||
MSC3026 = "msc3026"
|
||||
MSC3881 = "msc3881"
|
||||
MSC3575 = "msc3575"
|
||||
|
||||
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
||||
if self is ExperimentalFeature.MSC3881:
|
||||
return config.experimental.msc3881_enabled
|
||||
if self is ExperimentalFeature.MSC3575:
|
||||
return config.experimental.msc3575_enabled
|
||||
|
||||
assert_never(self)
|
||||
|
||||
|
||||
class ExperimentalFeaturesRestServlet(RestServlet):
|
||||
|
||||
@@ -61,8 +61,8 @@ class ListDestinationsRestServlet(RestServlet):
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
destination = parse_string(request, "destination")
|
||||
|
||||
@@ -181,8 +181,8 @@ class DestinationMembershipRestServlet(RestServlet):
|
||||
if not await self._store.is_destination_known(destination):
|
||||
raise NotFoundError("Unknown destination")
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
|
||||
|
||||
@@ -311,8 +311,8 @@ class DeleteMediaByDateSize(RestServlet):
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
before_ts = parse_integer(request, "before_ts", required=True, negative=False)
|
||||
size_gt = parse_integer(request, "size_gt", default=0, negative=False)
|
||||
before_ts = parse_integer(request, "before_ts", required=True)
|
||||
size_gt = parse_integer(request, "size_gt", default=0)
|
||||
keep_profiles = parse_boolean(request, "keep_profiles", default=True)
|
||||
|
||||
if before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
|
||||
@@ -377,8 +377,8 @@ class UserMediaRestServlet(RestServlet):
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
@@ -421,8 +421,8 @@ class UserMediaRestServlet(RestServlet):
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.http.servlet import (
|
||||
ResolveRoomIdMixin,
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_boolean,
|
||||
parse_enum,
|
||||
parse_integer,
|
||||
parse_json,
|
||||
@@ -242,13 +243,23 @@ class ListRoomRestServlet(RestServlet):
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
public_rooms = parse_boolean(request, "public_rooms")
|
||||
empty_rooms = parse_boolean(request, "empty_rooms")
|
||||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
reverse_order = True if direction == Direction.BACKWARDS else False
|
||||
|
||||
# Return list of rooms according to parameters
|
||||
rooms, total_rooms = await self.store.get_rooms_paginate(
|
||||
start, limit, order_by, reverse_order, search_term
|
||||
start,
|
||||
limit,
|
||||
order_by,
|
||||
reverse_order,
|
||||
search_term,
|
||||
public_rooms,
|
||||
empty_rooms,
|
||||
)
|
||||
|
||||
response = {
|
||||
# next_token should be opaque, so return a value the client can parse
|
||||
"offset": start,
|
||||
|
||||
@@ -63,10 +63,10 @@ class UserMediaStatisticsRestServlet(RestServlet):
|
||||
),
|
||||
)
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
from_ts = parse_integer(request, "from_ts", default=0, negative=False)
|
||||
until_ts = parse_integer(request, "until_ts", negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
from_ts = parse_integer(request, "from_ts", default=0)
|
||||
until_ts = parse_integer(request, "until_ts")
|
||||
|
||||
if until_ts is not None:
|
||||
if until_ts <= from_ts:
|
||||
|
||||
@@ -27,11 +27,13 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
|
||||
from synapse._pydantic_compat import HAS_PYDANTIC_V2
|
||||
from synapse.api.constants import Direction, UserTypes
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_and_validate_json_object_from_request,
|
||||
parse_boolean,
|
||||
parse_enum,
|
||||
parse_integer,
|
||||
@@ -49,10 +51,17 @@ from synapse.rest.client._base import client_patterns
|
||||
from synapse.storage.databases.main.registration import ExternalIDReuseException
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, JsonMapping, UserID
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
if TYPE_CHECKING or HAS_PYDANTIC_V2:
|
||||
from pydantic.v1 import StrictBool
|
||||
else:
|
||||
from pydantic import StrictBool
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -90,8 +99,8 @@ class UsersRestServletV2(RestServlet):
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
|
||||
user_id = parse_string(request, "user_id")
|
||||
name = parse_string(request, "name", encoding="utf-8")
|
||||
@@ -732,6 +741,36 @@ class DeactivateAccountRestServlet(RestServlet):
|
||||
return HTTPStatus.OK, {"id_server_unbind_result": id_server_unbind_result}
|
||||
|
||||
|
||||
class SuspendAccountRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/suspend/(?P<target_user_id>[^/]*)$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.is_mine = hs.is_mine
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
class PutBody(RequestBodyModel):
|
||||
suspend: StrictBool
|
||||
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, target_user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
if not self.is_mine(UserID.from_string(target_user_id)):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only suspend local users")
|
||||
|
||||
if not await self.store.get_user_by_id(target_user_id):
|
||||
raise NotFoundError("User not found")
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PutBody)
|
||||
suspend = body.suspend
|
||||
await self.store.set_user_suspended_status(target_user_id, suspend)
|
||||
|
||||
return HTTPStatus.OK, {f"user_{target_user_id}_suspended": suspend}
|
||||
|
||||
|
||||
class AccountValidityRenewServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ class KnockRoomAliasServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self._support_via = hs.config.experimental.msc4156_enabled
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
@@ -74,6 +75,13 @@ class KnockRoomAliasServlet(RestServlet):
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args, "server_name", required=False
|
||||
)
|
||||
if self._support_via:
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args,
|
||||
"org.matrix.msc4156.via",
|
||||
default=remote_room_hosts,
|
||||
required=False,
|
||||
)
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
handler = self.room_member_handler
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from synapse.http.server import (
|
||||
HttpServer,
|
||||
@@ -46,7 +47,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnstablePreviewURLServlet(RestServlet):
|
||||
class PreviewURLServlet(RestServlet):
|
||||
"""
|
||||
Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API
|
||||
for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
|
||||
@@ -64,9 +65,7 @@ class UnstablePreviewURLServlet(RestServlet):
|
||||
* Matrix cannot be used to distribute the metadata between homeservers.
|
||||
"""
|
||||
|
||||
PATTERNS = [
|
||||
re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$")
|
||||
]
|
||||
PATTERNS = [re.compile(r"^/_matrix/client/v1/media/preview_url$")]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -94,10 +93,8 @@ class UnstablePreviewURLServlet(RestServlet):
|
||||
respond_with_json_bytes(request, 200, og, send_cors=True)
|
||||
|
||||
|
||||
class UnstableMediaConfigResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$")
|
||||
]
|
||||
class MediaConfigResource(RestServlet):
|
||||
PATTERNS = [re.compile(r"^/_matrix/client/v1/media/config$")]
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
@@ -111,10 +108,10 @@ class UnstableMediaConfigResource(RestServlet):
|
||||
respond_with_json(request, 200, self.limits_dict, send_cors=True)
|
||||
|
||||
|
||||
class UnstableThumbnailResource(RestServlet):
|
||||
class ThumbnailResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(
|
||||
"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
|
||||
"/_matrix/client/v1/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
|
||||
)
|
||||
]
|
||||
|
||||
@@ -158,11 +155,25 @@ class UnstableThumbnailResource(RestServlet):
|
||||
if self._is_mine_server_name(server_name):
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnailer.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
else:
|
||||
await self.thumbnailer.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
else:
|
||||
@@ -190,18 +201,79 @@ class UnstableThumbnailResource(RestServlet):
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
True,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(server_name, media_id)
|
||||
|
||||
|
||||
class DownloadResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(
|
||||
"/_matrix/client/v1/media/download/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)(/(?P<file_name>[^/]*))?$"
|
||||
)
|
||||
]
|
||||
|
||||
def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
|
||||
super().__init__()
|
||||
self.media_repo = media_repo
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
file_name: Optional[str] = None,
|
||||
) -> None:
|
||||
# Validate the server name, raising if invalid
|
||||
parse_and_validate_server_name(server_name)
|
||||
|
||||
await self.auth.get_user_by_req(request)
|
||||
|
||||
set_cors_headers(request)
|
||||
set_corp_headers(request)
|
||||
request.setHeader(
|
||||
b"Content-Security-Policy",
|
||||
b"sandbox;"
|
||||
b" default-src 'none';"
|
||||
b" script-src 'none';"
|
||||
b" plugin-types application/pdf;"
|
||||
b" style-src 'unsafe-inline';"
|
||||
b" media-src 'self';"
|
||||
b" object-src 'self';",
|
||||
)
|
||||
# Limited non-standard form of CSP for IE11
|
||||
request.setHeader(b"X-Content-Security-Policy", b"sandbox;")
|
||||
request.setHeader(b"Referrer-Policy", b"no-referrer")
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self._is_mine_server_name(server_name):
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, file_name, max_timeout_ms
|
||||
)
|
||||
else:
|
||||
ip_address = request.getClientAddress().host
|
||||
await self.media_repo.get_remote_media(
|
||||
request,
|
||||
server_name,
|
||||
media_id,
|
||||
file_name,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
True,
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.config.experimental.msc3916_authenticated_media_enabled:
|
||||
media_repo = hs.get_media_repository()
|
||||
if hs.config.media.url_preview_enabled:
|
||||
UnstablePreviewURLServlet(
|
||||
hs, media_repo, media_repo.media_storage
|
||||
).register(http_server)
|
||||
UnstableMediaConfigResource(hs).register(http_server)
|
||||
UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register(
|
||||
media_repo = hs.get_media_repository()
|
||||
if hs.config.media.url_preview_enabled:
|
||||
PreviewURLServlet(hs, media_repo, media_repo.media_storage).register(
|
||||
http_server
|
||||
)
|
||||
MediaConfigResource(hs).register(http_server)
|
||||
ThumbnailResource(hs, media_repo, media_repo.media_storage).register(http_server)
|
||||
DownloadResource(hs, media_repo).register(http_server)
|
||||
|
||||
@@ -32,6 +32,7 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ...api.errors import SynapseError
|
||||
from ._base import client_patterns
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -56,7 +57,22 @@ class NotificationsServlet(RestServlet):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
from_token = parse_string(request, "from", required=False)
|
||||
# While this is intended to be "string" to clients, the 'from' token
|
||||
# is actually based on a numeric ID. So it must parse to an int.
|
||||
from_token_str = parse_string(request, "from", required=False)
|
||||
if from_token_str is not None:
|
||||
# Parse to an integer.
|
||||
try:
|
||||
from_token = int(from_token_str)
|
||||
except ValueError:
|
||||
# If it doesn't parse to an integer, then this cannot possibly be a valid
|
||||
# pagination token, as we only hand out integers.
|
||||
raise SynapseError(
|
||||
400, 'Query parameter "from" contains unrecognised token'
|
||||
)
|
||||
else:
|
||||
from_token = None
|
||||
|
||||
limit = parse_integer(request, "limit", default=50)
|
||||
only = parse_string(request, "only", required=False)
|
||||
|
||||
|
||||
@@ -108,6 +108,19 @@ class ProfileDisplaynameRestServlet(RestServlet):
|
||||
|
||||
propagate = _read_propagate(self.hs, request)
|
||||
|
||||
requester_suspended = (
|
||||
await self.hs.get_datastores().main.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Updating displayname while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
await self.profile_handler.set_displayname(
|
||||
user, requester, new_name, is_admin, propagate=propagate
|
||||
)
|
||||
@@ -167,6 +180,19 @@ class ProfileAvatarURLRestServlet(RestServlet):
|
||||
|
||||
propagate = _read_propagate(self.hs, request)
|
||||
|
||||
requester_suspended = (
|
||||
await self.hs.get_datastores().main.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Updating avatar URL while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
await self.profile_handler.set_avatar_url(
|
||||
user, requester, new_avatar_url, is_admin, propagate=propagate
|
||||
)
|
||||
|
||||
@@ -32,6 +32,7 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.push import PusherConfigException
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource
|
||||
from synapse.types import JsonDict
|
||||
@@ -49,20 +50,22 @@ class PushersRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user = requester.user
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(
|
||||
user.to_string()
|
||||
msc3881_enabled = await self._store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(user_id)
|
||||
|
||||
pusher_dicts = [p.as_dict() for p in pushers]
|
||||
|
||||
for pusher in pusher_dicts:
|
||||
if self._msc3881_enabled:
|
||||
if msc3881_enabled:
|
||||
pusher["org.matrix.msc3881.enabled"] = pusher["enabled"]
|
||||
pusher["org.matrix.msc3881.device_id"] = pusher["device_id"]
|
||||
del pusher["enabled"]
|
||||
@@ -80,11 +83,15 @@ class PushersSetRestServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.pusher_pool = self.hs.get_pusherpool()
|
||||
self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user = requester.user
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
msc3881_enabled = await self._store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
@@ -95,7 +102,7 @@ class PushersSetRestServlet(RestServlet):
|
||||
and content["kind"] is None
|
||||
):
|
||||
await self.pusher_pool.remove_pusher(
|
||||
content["app_id"], content["pushkey"], user_id=user.to_string()
|
||||
content["app_id"], content["pushkey"], user_id=user_id
|
||||
)
|
||||
return 200, {}
|
||||
|
||||
@@ -120,19 +127,19 @@ class PushersSetRestServlet(RestServlet):
|
||||
append = content["append"]
|
||||
|
||||
enabled = True
|
||||
if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content:
|
||||
if msc3881_enabled and "org.matrix.msc3881.enabled" in content:
|
||||
enabled = content["org.matrix.msc3881.enabled"]
|
||||
|
||||
if not append:
|
||||
await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
|
||||
app_id=content["app_id"],
|
||||
pushkey=content["pushkey"],
|
||||
not_user_id=user.to_string(),
|
||||
not_user_id=user_id,
|
||||
)
|
||||
|
||||
try:
|
||||
await self.pusher_pool.add_or_update_pusher(
|
||||
user_id=user.to_string(),
|
||||
user_id=user_id,
|
||||
kind=content["kind"],
|
||||
app_id=content["app_id"],
|
||||
app_display_name=content["app_display_name"],
|
||||
|
||||
@@ -417,6 +417,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
super().__init__(hs)
|
||||
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
|
||||
self.auth = hs.get_auth()
|
||||
self._support_via = hs.config.experimental.msc4156_enabled
|
||||
|
||||
def register(self, http_server: HttpServer) -> None:
|
||||
# /join/$room_identifier[/$txn_id]
|
||||
@@ -435,6 +436,13 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
||||
remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
|
||||
if self._support_via:
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args,
|
||||
"org.matrix.msc4156.via",
|
||||
default=remote_room_hosts,
|
||||
required=False,
|
||||
)
|
||||
room_id, remote_room_hosts = await self.resolve_room_id(
|
||||
room_identifier,
|
||||
remote_room_hosts,
|
||||
@@ -502,7 +510,7 @@ class PublicRoomListRestServlet(RestServlet):
|
||||
if server:
|
||||
raise e
|
||||
|
||||
limit: Optional[int] = parse_integer(request, "limit", 0, negative=False)
|
||||
limit: Optional[int] = parse_integer(request, "limit", 0)
|
||||
since_token = parse_string(request, "since")
|
||||
|
||||
if limit == 0:
|
||||
@@ -1112,6 +1120,20 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
requester_suspended = await self._store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
event = await self._store.get_event(event_id, allow_none=True)
|
||||
if event:
|
||||
if event.sender != requester.user.to_string():
|
||||
raise SynapseError(
|
||||
403,
|
||||
"You can only redact your own events while account is suspended.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
# Ensure the redacts property in the content matches the one provided in
|
||||
# the URL.
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
@@ -1422,16 +1444,7 @@ class RoomHierarchyRestServlet(RestServlet):
|
||||
requester = await self._auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
max_depth = parse_integer(request, "max_depth")
|
||||
if max_depth is not None and max_depth < 0:
|
||||
raise SynapseError(
|
||||
400, "'max_depth' must be a non-negative integer", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
limit = parse_integer(request, "limit")
|
||||
if limit is not None and limit <= 0:
|
||||
raise SynapseError(
|
||||
400, "'limit' must be a positive integer", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
return 200, await self._room_summary_handler.get_room_hierarchy(
|
||||
requester,
|
||||
|
||||
@@ -53,6 +53,7 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import trace_with_opname
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.types import JsonDict, Requester, StreamToken
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
from synapse.util import json_decoder
|
||||
@@ -673,7 +674,9 @@ class SlidingSyncE2eeRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
requester = await self.auth.get_user_by_req_experimental_feature(
|
||||
request, allow_guest=True, feature=ExperimentalFeature.MSC3575
|
||||
)
|
||||
user = requester.user
|
||||
device_id = requester.device_id
|
||||
|
||||
@@ -761,7 +764,6 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [ [0, 99] ],
|
||||
"sort": [ "by_notification_level", "by_recency", "by_name" ],
|
||||
"required_state": [
|
||||
["m.room.join_rules", ""],
|
||||
["m.room.history_visibility", ""],
|
||||
@@ -771,7 +773,6 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"filters": {
|
||||
"is_dm": true
|
||||
},
|
||||
"bump_event_types": [ "m.room.message", "m.room.encrypted" ],
|
||||
}
|
||||
},
|
||||
// Room Subscriptions API
|
||||
@@ -779,10 +780,6 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"!sub1:bar": {
|
||||
"required_state": [ ["*","*"] ],
|
||||
"timeline_limit": 10,
|
||||
"include_old_rooms": {
|
||||
"timeline_limit": 1,
|
||||
"required_state": [ ["m.room.tombstone", ""], ["m.room.create", ""] ],
|
||||
}
|
||||
}
|
||||
},
|
||||
// Extensions API
|
||||
@@ -791,7 +788,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
Response JSON::
|
||||
{
|
||||
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
|
||||
"pos": "s58_224_0_13_10_1_1_16_0_1",
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"count": 1337,
|
||||
@@ -830,7 +827,8 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"joined_count": 41,
|
||||
"invited_count": 1,
|
||||
"notification_count": 1,
|
||||
"highlight_count": 0
|
||||
"highlight_count": 0,
|
||||
"num_live": 2"
|
||||
},
|
||||
// rooms from list
|
||||
"!foo:bar": {
|
||||
@@ -855,7 +853,8 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"joined_count": 4,
|
||||
"invited_count": 0,
|
||||
"notification_count": 54,
|
||||
"highlight_count": 3
|
||||
"highlight_count": 3,
|
||||
"num_live": 1,
|
||||
},
|
||||
// ... 99 more items
|
||||
},
|
||||
@@ -864,19 +863,23 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc3575/sync$", releases=[], v1=False, unstable=True
|
||||
"/org.matrix.simplified_msc3575/sync$", releases=[], v1=False, unstable=True
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
self.filtering = hs.get_filtering()
|
||||
self.sliding_sync_handler = hs.get_sliding_sync_handler()
|
||||
self.event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
# TODO: Update this to `on_GET` once we figure out how we want to handle params
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
requester = await self.auth.get_user_by_req_experimental_feature(
|
||||
request, allow_guest=True, feature=ExperimentalFeature.MSC3575
|
||||
)
|
||||
|
||||
user = requester.user
|
||||
device_id = requester.device_id
|
||||
|
||||
@@ -920,22 +923,25 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
logger.info("Client has disconnected; not serializing response.")
|
||||
return 200, {}
|
||||
|
||||
response_content = await self.encode_response(sliding_sync_results)
|
||||
response_content = await self.encode_response(requester, sliding_sync_results)
|
||||
|
||||
return 200, response_content
|
||||
|
||||
# TODO: Is there a better way to encode things?
|
||||
async def encode_response(
|
||||
self,
|
||||
requester: Requester,
|
||||
sliding_sync_result: SlidingSyncResult,
|
||||
) -> JsonDict:
|
||||
response: JsonDict = defaultdict(dict)
|
||||
|
||||
response["next_pos"] = await sliding_sync_result.next_pos.to_string(self.store)
|
||||
response["pos"] = await sliding_sync_result.next_pos.to_string(self.store)
|
||||
serialized_lists = self.encode_lists(sliding_sync_result.lists)
|
||||
if serialized_lists:
|
||||
response["lists"] = serialized_lists
|
||||
response["rooms"] = {} # TODO: sliding_sync_result.rooms
|
||||
response["rooms"] = await self.encode_rooms(
|
||||
requester, sliding_sync_result.rooms
|
||||
)
|
||||
response["extensions"] = {} # TODO: sliding_sync_result.extensions
|
||||
|
||||
return response
|
||||
@@ -961,10 +967,96 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return serialized_lists
|
||||
|
||||
async def encode_rooms(
|
||||
self,
|
||||
requester: Requester,
|
||||
rooms: Dict[str, SlidingSyncResult.RoomResult],
|
||||
) -> JsonDict:
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
serialize_options = SerializeEventConfig(
|
||||
event_format=format_event_for_client_v2_without_room_id,
|
||||
requester=requester,
|
||||
)
|
||||
|
||||
serialized_rooms: Dict[str, JsonDict] = {}
|
||||
for room_id, room_result in rooms.items():
|
||||
serialized_rooms[room_id] = {
|
||||
"bump_stamp": room_result.bump_stamp,
|
||||
"joined_count": room_result.joined_count,
|
||||
"invited_count": room_result.invited_count,
|
||||
"notification_count": room_result.notification_count,
|
||||
"highlight_count": room_result.highlight_count,
|
||||
}
|
||||
|
||||
if room_result.name:
|
||||
serialized_rooms[room_id]["name"] = room_result.name
|
||||
|
||||
if room_result.avatar:
|
||||
serialized_rooms[room_id]["avatar"] = room_result.avatar
|
||||
|
||||
if room_result.heroes:
|
||||
serialized_rooms[room_id]["heroes"] = room_result.heroes
|
||||
|
||||
# We should only include the `initial` key if it's `True` to save bandwidth.
|
||||
# The absense of this flag means `False`.
|
||||
if room_result.initial:
|
||||
serialized_rooms[room_id]["initial"] = room_result.initial
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.required_state is not None:
|
||||
serialized_required_state = (
|
||||
await self.event_serializer.serialize_events(
|
||||
room_result.required_state,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
)
|
||||
)
|
||||
serialized_rooms[room_id]["required_state"] = serialized_required_state
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.timeline_events is not None:
|
||||
serialized_timeline = await self.event_serializer.serialize_events(
|
||||
room_result.timeline_events,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
bundle_aggregations=room_result.bundled_aggregations,
|
||||
)
|
||||
serialized_rooms[room_id]["timeline"] = serialized_timeline
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.limited is not None:
|
||||
serialized_rooms[room_id]["limited"] = room_result.limited
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.prev_batch is not None:
|
||||
serialized_rooms[room_id]["prev_batch"] = (
|
||||
await room_result.prev_batch.to_string(self.store)
|
||||
)
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.num_live is not None:
|
||||
serialized_rooms[room_id]["num_live"] = room_result.num_live
|
||||
|
||||
# Field should be absent on non-DM rooms
|
||||
if room_result.is_dm:
|
||||
serialized_rooms[room_id]["is_dm"] = room_result.is_dm
|
||||
|
||||
# Stripped state only applies to invite/knock rooms
|
||||
if room_result.stripped_state is not None:
|
||||
# TODO: `knocked_state` but that isn't specced yet.
|
||||
#
|
||||
# TODO: Instead of adding `knocked_state`, it would be good to rename
|
||||
# this to `stripped_state` so it can be shared between invite and knock
|
||||
# rooms, see
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1117629919
|
||||
serialized_rooms[room_id]["invite_state"] = room_result.stripped_state
|
||||
|
||||
return serialized_rooms
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
SyncRestServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.experimental.msc3575_enabled:
|
||||
SlidingSyncRestServlet(hs).register(http_server)
|
||||
SlidingSyncE2eeRestServlet(hs).register(http_server)
|
||||
SlidingSyncRestServlet(hs).register(http_server)
|
||||
SlidingSyncE2eeRestServlet(hs).register(http_server)
|
||||
|
||||
@@ -25,11 +25,11 @@ import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -45,6 +45,8 @@ class VersionsRestServlet(RestServlet):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.config = hs.config
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
# Calculate these once since they shouldn't change after start-up.
|
||||
self.e2ee_forced_public = (
|
||||
@@ -60,7 +62,22 @@ class VersionsRestServlet(RestServlet):
|
||||
in self.config.room.encryption_enabled_by_default_for_room_presets
|
||||
)
|
||||
|
||||
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
msc3881_enabled = self.config.experimental.msc3881_enabled
|
||||
|
||||
if self.auth.has_access_token(request):
|
||||
requester = await self.auth.get_user_by_req(
|
||||
request,
|
||||
allow_guest=True,
|
||||
allow_locked=True,
|
||||
allow_expired=True,
|
||||
)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
msc3881_enabled = await self.store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
return (
|
||||
200,
|
||||
{
|
||||
@@ -90,6 +107,7 @@ class VersionsRestServlet(RestServlet):
|
||||
"v1.8",
|
||||
"v1.9",
|
||||
"v1.10",
|
||||
"v1.11",
|
||||
],
|
||||
# as per MSC1497:
|
||||
"unstable_features": {
|
||||
@@ -124,7 +142,7 @@ class VersionsRestServlet(RestServlet):
|
||||
# TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
|
||||
"org.matrix.msc3882": self.config.auth.login_via_existing_enabled,
|
||||
# Adds support for remotely enabling/disabling pushers, as per MSC3881
|
||||
"org.matrix.msc3881": self.config.experimental.msc3881_enabled,
|
||||
"org.matrix.msc3881": msc3881_enabled,
|
||||
# Adds support for filtering /messages by event relation.
|
||||
"org.matrix.msc3874": self.config.experimental.msc3874_enabled,
|
||||
# Adds support for simple HTTP rendezvous as per MSC3886
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user