1
0

Compare commits

..

2 Commits

Author SHA1 Message Date
Olivier 'reivilibre
cde3f1d5f7 watci2 2025-03-21 16:15:08 +00:00
Olivier 'reivilibre
a544e574bb watci 2025-03-21 16:09:35 +00:00
104 changed files with 394 additions and 1976 deletions

View File

@@ -30,7 +30,7 @@ jobs:
run: docker buildx inspect
- name: Install Cosign
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@@ -24,7 +24,7 @@ jobs:
mdbook-version: '0.4.17'
- name: Setup python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
@@ -39,7 +39,7 @@ jobs:
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: book
path: book

View File

@@ -64,7 +64,7 @@ jobs:
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
- name: Setup python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"

View File

@@ -44,6 +44,6 @@ jobs:
- run: cargo fmt
continue-on-error: true
- uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0
- uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0
with:
commit_message: "Attempt to fix linting"

View File

@@ -86,7 +86,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- run: pip install .[all,test]
@@ -164,7 +164,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})

View File

@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.x'
- run: pip install tomli

View File

@@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.x'
- id: set-distros
@@ -66,7 +66,7 @@ jobs:
install: true
- name: Set up docker layer caching
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -74,7 +74,7 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.x'
@@ -101,7 +101,7 @@ jobs:
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
path: debs/*
@@ -132,7 +132,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
@@ -165,7 +165,7 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
path: ./wheelhouse/*.whl
@@ -177,7 +177,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.10'
@@ -186,7 +186,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: Sdist
path: dist/*.tar.gz
@@ -203,7 +203,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
uses: actions/download-artifact@b14cf4c92620c250e1c074ab0a5800e37df86765 # v4.2.0
- name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress
# that.
@@ -213,7 +213,7 @@ jobs:
tar -cvJf debs.tar.xz debs
- name: Attach to release
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -102,7 +102,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
@@ -112,7 +112,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- run: .ci/scripts/check_lockfile.py
@@ -167,7 +167,7 @@ jobs:
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: |
.mypy_cache
@@ -188,11 +188,46 @@ jobs:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- run: |
env | while read -r line
do
var_name=$(echo "$line" | cut -d= -f1)
var_value=$(eval echo "\$$var_name")
if [ -z "$var_value" ]; then
echo "$var_name: EMPTY"
else
echo "$var_name: NOT EMPTY"
fi
done
- name: Cache Primes
id: cache-primes
uses: actions/cache@v4
with:
path: prime-numbers
key: ${{ runner.os }}-primes
- run: |
env | while read -r line
do
var_name=$(echo "$line" | cut -d= -f1)
var_value=$(eval echo "\$$var_name")
if [ -z "$var_value" ]; then
echo "$var_name: EMPTY"
else
echo "$var_name: NOT EMPTY"
fi
done
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
@@ -279,7 +314,7 @@ jobs:
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- run: "pip install rstcheck"
@@ -327,7 +362,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: "3.x"
- id: get-matrix
@@ -414,7 +449,7 @@ jobs:
sudo apt-get -qq install build-essential libffi-dev python3-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.9'
@@ -529,7 +564,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -627,7 +662,7 @@ jobs:
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps

View File

@@ -11,7 +11,7 @@ jobs:
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
- uses: actions/add-to-project@f5473ace9aeee8b97717b281e26980aa5097023f # main (v1.0.2 + 10 commits)
id: add_project
with:
project-url: "https://github.com/orgs/matrix-org/projects/67"

View File

@@ -138,7 +138,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})

View File

@@ -1,75 +1,3 @@
# Synapse 1.128.0 (2025-04-08)
No significant changes since 1.128.0rc1.
# Synapse 1.128.0rc1 (2025-04-01)
### Features
- Add an access token introspection cache to make Matrix Authentication Service integration ([MSC3861](https://github.com/matrix-org/matrix-doc/pull/3861)) more efficient. ([\#18231](https://github.com/element-hq/synapse/issues/18231))
- Add background job to clear unreferenced state groups. ([\#18254](https://github.com/element-hq/synapse/issues/18254))
- Hashes of media files are now tracked by Synapse. Media quarantines will now apply to all files with the same hash. ([\#18277](https://github.com/element-hq/synapse/issues/18277), [\#18302](https://github.com/element-hq/synapse/issues/18302), [\#18296](https://github.com/element-hq/synapse/issues/18296))
### Bugfixes
- Add index to sliding sync ([MSC4186](https://github.com/matrix-org/matrix-doc/pull/4186)) membership snapshot table, to fix a performance issue. ([\#18074](https://github.com/element-hq/synapse/issues/18074))
### Updates to the Docker image
- Specify the architecture of installed packages via an APT config option, which is more reliable than appending package names with `:{arch}`. ([\#18271](https://github.com/element-hq/synapse/issues/18271))
- Always specify base image debian versions with a build argument. ([\#18272](https://github.com/element-hq/synapse/issues/18272))
- Allow passing arguments to `start_for_complement.sh` (to be sent to `configure_workers_and_start.py`). ([\#18273](https://github.com/element-hq/synapse/issues/18273))
- Make some improvements to the `prefix-log` script in the workers image. ([\#18274](https://github.com/element-hq/synapse/issues/18274))
- Use `uv pip` to install `supervisor` in the worker image. ([\#18275](https://github.com/element-hq/synapse/issues/18275))
- Avoid needing to download & use `rsync` in a build layer. ([\#18287](https://github.com/element-hq/synapse/issues/18287))
### Improved Documentation
- Fix how to obtain access token and change naming from riot to element ([\#18225](https://github.com/element-hq/synapse/issues/18225))
- Correct a small typo in the SSO mapping providers documentation. ([\#18276](https://github.com/element-hq/synapse/issues/18276))
- Add docs for how to clear out the Poetry wheel cache. ([\#18283](https://github.com/element-hq/synapse/issues/18283))
### Internal Changes
- Add a column `participant` to `room_memberships` table. ([\#18068](https://github.com/element-hq/synapse/issues/18068))
- Update Poetry to 2.1.1, including updating the lock file version. ([\#18251](https://github.com/element-hq/synapse/issues/18251))
- Pin GitHub Actions dependencies by commit hash. ([\#18255](https://github.com/element-hq/synapse/issues/18255))
- Add DB delta to remove the old state group deletion job. ([\#18284](https://github.com/element-hq/synapse/issues/18284))
### Updates to locked dependencies
* Bump actions/add-to-project from f5473ace9aeee8b97717b281e26980aa5097023f to 280af8ae1f83a494cfad2cb10f02f6d13529caa9. ([\#18303](https://github.com/element-hq/synapse/issues/18303))
* Bump actions/cache from 4.2.2 to 4.2.3. ([\#18266](https://github.com/element-hq/synapse/issues/18266))
* Bump actions/download-artifact from 4.2.0 to 4.2.1. ([\#18268](https://github.com/element-hq/synapse/issues/18268))
* Bump actions/setup-python from 5.4.0 to 5.5.0. ([\#18298](https://github.com/element-hq/synapse/issues/18298))
* Bump actions/upload-artifact from 4.6.1 to 4.6.2. ([\#18304](https://github.com/element-hq/synapse/issues/18304))
* Bump authlib from 1.4.1 to 1.5.1. ([\#18306](https://github.com/element-hq/synapse/issues/18306))
* Bump dawidd6/action-download-artifact from 8 to 9. ([\#18204](https://github.com/element-hq/synapse/issues/18204))
* Bump jinja2 from 3.1.5 to 3.1.6. ([\#18223](https://github.com/element-hq/synapse/issues/18223))
* Bump log from 0.4.26 to 0.4.27. ([\#18267](https://github.com/element-hq/synapse/issues/18267))
* Bump phonenumbers from 8.13.50 to 9.0.2. ([\#18299](https://github.com/element-hq/synapse/issues/18299))
* Bump pygithub from 2.5.0 to 2.6.1. ([\#18243](https://github.com/element-hq/synapse/issues/18243))
* Bump pyo3-log from 0.12.1 to 0.12.2. ([\#18269](https://github.com/element-hq/synapse/issues/18269))
# Synapse 1.127.1 (2025-03-26)
## Security
- Fix [CVE-2025-30355](https://www.cve.org/CVERecord?id=CVE-2025-30355) / [GHSA-v56r-hwv5-mxg6](https://github.com/element-hq/synapse/security/advisories/GHSA-v56r-hwv5-mxg6). **High severity vulnerability affecting federation. The vulnerability has been exploited in the wild.**
# Synapse 1.127.0 (2025-03-25)
No significant changes since 1.127.0rc1.
# Synapse 1.127.0rc1 (2025-03-18)
### Features

12
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.98"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
[[package]]
name = "arc-swap"
@@ -223,9 +223,9 @@ checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
[[package]]
name = "log"
version = "0.4.27"
version = "0.4.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
[[package]]
name = "memchr"
@@ -316,9 +316,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.12.3"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
checksum = "be5bb22b77965a7b5394e9aae9897a0607b51df5167561ffc3b02643b4200bc7"
dependencies = [
"arc-swap",
"log",

1
changelog.d/18068.misc Normal file
View File

@@ -0,0 +1 @@
Add a column `participant` to `room_memberships` table.

1
changelog.d/18074.bugfix Normal file
View File

@@ -0,0 +1 @@
Add index to sliding sync membership snapshot table, to fix a performance issue.

View File

@@ -1 +0,0 @@
Disable statement timeout during room purge.

View File

@@ -1 +0,0 @@
Add `passthrough_authorization_parameters` in OIDC configuration to allow to pass parameters to the authorization grant URL.

View File

@@ -1 +0,0 @@
Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.

1
changelog.d/18251.misc Normal file
View File

@@ -0,0 +1 @@
Update Poetry to 2.1.1, including updating the lock file version.

1
changelog.d/18255.misc Normal file
View File

@@ -0,0 +1 @@
Pin GitHub Actions dependencies by commit hash.

View File

@@ -1 +0,0 @@
In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.

View File

@@ -1 +0,0 @@
Optimize the build of the workers image.

View File

@@ -1 +0,0 @@
In start_for_complement.sh, replace some external program calls with shell builtins.

View File

@@ -1 +0,0 @@
Optimize the build of the complement-synapse image.

View File

@@ -1 +0,0 @@
When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.

View File

@@ -1 +0,0 @@
Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.

View File

@@ -1 +0,0 @@
Fix `force_tracing_for_users` config when using delegated auth.

View File

@@ -1 +0,0 @@
Fix the token introspection cache logging access tokens when MAS integration is in use.

View File

@@ -1 +0,0 @@
Add cache to storage functions used to auth requests when using delegated auth.

View File

@@ -1 +0,0 @@
Stop caching introspection failures when delegating auth to MAS.

View File

@@ -1 +0,0 @@
Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic.

View File

@@ -1 +0,0 @@
Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0.

View File

@@ -1 +0,0 @@
Add support for handling `GET /devices/` on workers.

View File

@@ -1 +0,0 @@
Allow `/rooms/` admin API to be run on workers.

View File

@@ -1 +0,0 @@
Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.

View File

@@ -1 +0,0 @@
Minor performance improvements to the notifier.

View File

@@ -1 +0,0 @@
Slight performance increase when using the ratelimiter.

View File

@@ -1 +0,0 @@
Allow client & media admin apis to coexist.

23
debian/changelog vendored
View File

@@ -1,27 +1,8 @@
matrix-synapse-py3 (1.128.0) stable; urgency=medium
* New Synapse release 1.128.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Apr 2025 14:09:54 +0100
matrix-synapse-py3 (1.128.0~rc1) stable; urgency=medium
matrix-synapse-py3 (1.127.0~rc1+nmu1) UNRELEASED; urgency=medium
* Update Poetry to 2.1.1.
* New synapse release 1.128.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Apr 2025 14:35:33 +0000
matrix-synapse-py3 (1.127.1) stable; urgency=medium
* New Synapse release 1.127.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Mar 2025 21:07:31 +0000
matrix-synapse-py3 (1.127.0) stable; urgency=medium
* New Synapse release 1.127.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Mar 2025 12:04:15 +0000
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Mar 2025 17:38:49 +0000
matrix-synapse-py3 (1.127.0~rc1) stable; urgency=medium

View File

@@ -134,6 +134,7 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
apt-get install -y --no-install-recommends rsync && \
apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
curl \
gosu \
@@ -147,10 +148,14 @@ RUN \
for arch in arm64 amd64; do \
mkdir -p /tmp/debs-${arch} && \
cd /tmp/debs-${arch} && \
apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \
apt-get download $(sed "s/$/:${arch}/" /tmp/pkg-list); \
done
# Extract the debs for each architecture
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
# libraries to the right place, else the `COPY` won't work.
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
# already present in the runtime image.
RUN \
for arch in arm64 amd64; do \
mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \
@@ -160,6 +165,8 @@ RUN \
dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \
dpkg --extract $deb /install-${arch}; \
done; \
rsync -avr /install-${arch}/lib/ /install-${arch}/usr/lib; \
rm -rf /install-${arch}/lib /install-${arch}/lib64; \
done
@@ -176,14 +183,7 @@ LABEL org.opencontainers.image.documentation='https://github.com/element-hq/syna
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
# libraries to the right place, else the `COPY` won't work.
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
# already present in the runtime image.
COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib
COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc
COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr
COPY --from=runtime-deps /install-${TARGETARCH}/var /var
COPY --from=runtime-deps /install-${TARGETARCH} /
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf

View File

@@ -2,38 +2,18 @@
ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=bookworm
ARG PYTHON_VERSION=3.12
# first of all, we create a base image with dependencies which we can copy into the
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
# Tell apt to keep downloaded package files, as we're using cache mounts.
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
FROM docker.io/library/debian:bookworm-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
nginx-light
RUN \
# remove default page
rm /etc/nginx/sites-enabled/default && \
# have nginx log to stderr/out
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stderr /var/log/nginx/error.log
# --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
# (mounted as --mount=type=cache) and the target directory.
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
RUN mkdir -p /uv/etc/supervisor/conf.d
redis-server nginx-light
# Similarly, a base to copy the redis server from.
#
@@ -41,21 +21,31 @@ FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
FROM docker.io/library/redis:7-bookworm AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
# Copy over dependencies
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
RUN mkdir -p /etc/supervisor/conf.d
# Copy over redis and nginx
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
COPY --from=deps_base /uv /
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
COPY --from=deps_base /etc/nginx /etc/nginx
COPY --from=deps_base /var/log/nginx /var/log/nginx
# chown to allow non-root user to write to http-*-temp-path dirs
COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
RUN chown www-data /var/lib/nginx
# have nginx log to stderr/out
RUN ln -sf /dev/stdout /var/log/nginx/access.log
RUN ln -sf /dev/stderr /var/log/nginx/error.log
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
@@ -74,4 +64,4 @@ FROM $FROM
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD ["/healthcheck.sh"]
CMD /bin/sh /healthcheck.sh

View File

@@ -9,9 +9,6 @@
ARG SYNAPSE_VERSION=latest
# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=bookworm
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
@@ -23,9 +20,9 @@ FROM $FROM
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
@@ -58,4 +55,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD ["/healthcheck.sh"]
CMD /bin/sh /healthcheck.sh

View File

@@ -5,12 +5,12 @@
set -e
echo "Complement Synapse launcher"
echo " Args: $*"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
function log {
d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
echo "$d $*"
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Set the server name of the homeserver
@@ -103,11 +103,12 @@ fi
# Note that both the key and certificate are in PEM format (not DER).
# First generate a configuration file to set up a Subject Alternative Name.
echo "\
cat > /conf/server.tls.conf <<EOF
.include /etc/ssl/openssl.cnf
[SAN]
subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
subjectAltName=DNS:${SERVER_NAME}
EOF
# Generate an RSA key
openssl genrsa -out /conf/server.tls.key 2048
@@ -122,12 +123,12 @@ openssl x509 -req -in /conf/server.tls.csr \
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
# Assert that we have a Subject Alternative Name in the certificate.
# (the test will exit with 1 here if there isn't a SAN in the certificate.)
[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py "$@"
exec /configure_workers_and_start.py

View File

@@ -1,4 +1,4 @@
#!/usr/local/bin/python
#!/usr/bin/env python
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
@@ -376,11 +376,9 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
#
# We use append mode in case the files have already been written to by something else
# (for instance, as part of the instructions in a dockerfile).
exists = os.path.isfile(dst)
with open(dst, "a") as outfile:
# In case the existing file doesn't end with a newline
if exists:
outfile.write("\n")
outfile.write("\n")
outfile.write(rendered)
@@ -606,7 +604,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def parse_worker_types(
@@ -1000,7 +998,6 @@ def generate_worker_files(
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
os.chmod("/healthcheck.sh", 0o755)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"

View File

@@ -10,9 +10,6 @@
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
# confusion due to to interleaving of the different processes.
prefixer() {
mawk -W interactive '{printf("%s | %s\n", ENVIRON["SUPERVISOR_PROCESS_NAME"], $0); fflush() }'
}
exec 1> >(prefixer)
exec 2> >(prefixer >&2)
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
exec "$@"

View File

@@ -46,14 +46,6 @@ to any local media, and any locally-cached copies of remote media.
The media file itself (and any thumbnails) is not deleted from the server.
Since Synapse 1.128.0, hashes of uploaded media are tracked. If this media
is quarantined, Synapse will:
- Quarantine any media with a matching hash that has already been uploaded.
- Quarantine any future media.
- Quarantine any existing cached remote media.
- Quarantine any future remote media.
## Quarantining media by ID
This API quarantines a single piece of local or remote media.

View File

@@ -162,7 +162,7 @@ by a unique name, the current status (stored in JSON), and some dependency infor
* Whether the update requires a previous update to be complete.
* A rough ordering for which to complete updates.
A new background update needs to be added to the `background_updates` table:
A new background updates needs to be added to the `background_updates` table:
```sql
INSERT INTO background_updates (ordering, update_name, depends_on, progress_json) VALUES

View File

@@ -150,28 +150,6 @@ $ poetry shell
$ poetry install --extras all
```
If you want to go even further and remove the Poetry caches:
```shell
# Find your Poetry cache directory
# Docs: https://github.com/python-poetry/poetry/blob/main/docs/configuration.md#cache-directory
$ poetry config cache-dir
# Remove packages from all cached repositories
$ poetry cache clear --all .
# Go completely nuclear and clear out everything Poetry cache related
# including the wheel artifacts which is not covered by the above command
# (see https://github.com/python-poetry/poetry/issues/10304)
#
# This is necessary in order to rebuild or fetch new wheels. For example, if you update
# the `icu` library in on your system, you will need to rebuild the PyICU Python package
# in order to incorporate the correct dynamically linked library locations otherwise you
# will run into errors like: `ImportError: libicui18n.so.75: cannot open shared object file: No such file or directory`
$ rm -rf $(poetry config cache-dir)
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.

View File

@@ -23,7 +23,6 @@ such as [Github][github-idp].
[auth0]: https://auth0.com/
[authentik]: https://goauthentik.io/
[lemonldap]: https://lemonldap-ng.org/
[pocket-id]: https://pocket-id.org/
[okta]: https://www.okta.com/
[dex-idp]: https://github.com/dexidp/dex
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
@@ -625,32 +624,6 @@ oidc_providers:
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
### Pocket ID
[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys.
1. Go to `OIDC Clients`
2. Click on `Add OIDC Client`
3. Add a name, for example `Synapse`
4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain
5. Click on `Save`
6. Note down your `Client ID` and `Client secret`, these will be used later
Synapse config:
```yaml
oidc_providers:
- idp_id: pocket_id
idp_name: Pocket ID
issuer: "https://auth.example.org/" # Replace with your domain
client_id: "your-client-id" # Replace with the "Client ID" you noted down before
client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Shibboleth with OIDC Plugin
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.

View File

@@ -10,7 +10,7 @@ As an example, a SSO service may return the email address
to turn that into a displayname when creating a Matrix user for this individual.
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
variations. As each Synapse configuration may want something different, this is
where SSO mapping providers come into play.
where SAML mapping providers come into play.
SSO mapping providers are currently supported for OpenID and SAML SSO
configurations. Please see the details below for how to implement your own.

View File

@@ -160,7 +160,7 @@ Using the following curl command:
```console
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
```
`<access-token>` - can be obtained in element by looking in All settings, clicking Help & About and down the bottom is:
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
Access Token:\<click to reveal\>
`<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org

View File

@@ -3672,9 +3672,6 @@ Options for each entry include:
* `additional_authorization_parameters`: String to string dictionary that will be passed as
additional parameters to the authorization grant URL.
* `passthrough_authorization_parameters`: List of parameters that will be passed through from the redirect endpoint
to the authorization grant URL.
* `allow_existing_users`: set to true to allow a user logging in via OIDC to
match a pre-existing account instead of failing. This could be used if
switching from password logins to OIDC. Defaults to false.
@@ -3801,7 +3798,6 @@ oidc_providers:
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
additional_authorization_parameters:
acr_values: 2fa
passthrough_authorization_parameters: ["login_hint"]
skip_verification: true
enable_registration: true
user_mapping_provider:
@@ -4018,7 +4014,7 @@ This option has a number of sub-options. They are as follows:
* `include_content`: Clients requesting push notifications can either have the body of
the message sent in the notification poke along with other details
like the sender, or just the event ID and room ID (`event_id_only`).
If clients choose to have the body sent, this option controls whether the
If clients choose the to have the body sent, this option controls whether the
notification request includes the content of the event (other details
like the sender are still included). If `event_id_only` is enabled, it
has no effect.

View File

@@ -249,7 +249,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
^/_matrix/client/(r0|v3|unstable)/capabilities$
^/_matrix/client/(r0|v3|unstable)/notifications$
^/_synapse/admin/v1/rooms/
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
@@ -281,7 +280,6 @@ Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
# Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags

89
poetry.lock generated
View File

@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
[[package]]
name = "authlib"
version = "1.5.1"
version = "1.4.1"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
files = [
{file = "authlib-1.5.1-py2.py3-none-any.whl", hash = "sha256:8408861cbd9b4ea2ff759b00b6f02fd7d81ac5a56d0b2b22c08606c6049aae11"},
{file = "authlib-1.5.1.tar.gz", hash = "sha256:5cbc85ecb0667312c1cdc2f9095680bb735883b123fb509fde1e65b1c5df972e"},
{file = "Authlib-1.4.1-py2.py3-none-any.whl", hash = "sha256:edc29c3f6a3e72cd9e9f45fff67fc663a2c364022eb0371c003f22d5405915c1"},
{file = "authlib-1.4.1.tar.gz", hash = "sha256:30ead9ea4993cdbab821dc6e01e818362f92da290c04c7f6a1940f86507a790d"},
]
[package.dependencies]
@@ -451,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -494,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -544,7 +544,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"all\" or extra == \"redis\""
markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
@@ -890,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -943,14 +943,14 @@ trio = ["async_generator ; python_version == \"3.6\"", "trio"]
[[package]]
name = "jinja2"
version = "3.1.6"
version = "3.1.5"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
{file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"},
{file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"},
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
{file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
]
[package.dependencies]
@@ -1028,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1044,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"url-preview\""
markers = "extra == \"url-preview\" or extra == \"all\""
files = [
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
@@ -1330,7 +1330,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1551,7 +1551,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1588,14 +1588,14 @@ dev = ["jinja2"]
[[package]]
name = "phonenumbers"
version = "9.0.2"
version = "8.13.50"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "phonenumbers-9.0.2-py2.py3-none-any.whl", hash = "sha256:dbcec6bdfdf3973f60b81dc0fcac3f7b1638f877ac42da4d7b46724ed413e2b9"},
{file = "phonenumbers-9.0.2.tar.gz", hash = "sha256:f590ee2b729bdd9873ca2d52989466add14c9953b48805c0aeb408348d4d6224"},
{file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"},
{file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"},
]
[[package]]
@@ -1709,7 +1709,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"all\" or extra == \"postgres\""
markers = "extra == \"postgres\" or extra == \"all\""
files = [
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
@@ -1730,7 +1730,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1746,7 +1746,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -1929,14 +1929,14 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygithub"
version = "2.6.1"
version = "2.5.0"
description = "Use the full Github API v3"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3"},
{file = "pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf"},
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
]
[package.dependencies]
@@ -1969,7 +1969,7 @@ description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"user-search\""
markers = "extra == \"user-search\" or extra == \"all\""
files = [
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
]
@@ -2018,7 +2018,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"cache-memory\""
markers = "extra == \"cache-memory\" or extra == \"all\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2053,19 +2053,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
version = "25.0.0"
version = "24.3.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
{file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
]
[package.dependencies]
cryptography = ">=41.0.5,<45"
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
@@ -2078,7 +2077,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2103,7 +2102,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2131,7 +2130,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
@@ -2495,7 +2494,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"sentry\""
markers = "extra == \"sentry\" or extra == \"all\""
files = [
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
@@ -2679,7 +2678,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2695,7 +2694,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2757,7 +2756,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
@@ -2891,7 +2890,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"redis\""
markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"},
{file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"},
@@ -2957,14 +2956,14 @@ files = [
[[package]]
name = "types-jsonschema"
version = "4.23.0.20241208"
version = "4.23.0.20240813"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
{file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
{file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
]
[package.dependencies]
@@ -3008,14 +3007,14 @@ files = [
[[package]]
name = "types-psycopg2"
version = "2.9.21.20250318"
version = "2.9.21.20250121"
description = "Typing stubs for psycopg2"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"},
{file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"},
{file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"},
{file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"},
]
[[package]]
@@ -3219,7 +3218,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},

View File

@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.128.0"
version = "1.127.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"

View File

@@ -192,11 +192,6 @@ APPEND_ONLY_TABLES = [
IGNORED_TABLES = {
# Porting the auto generated sequence in this table is non-trivial.
# None of the entries in this list are mandatory for Synapse to keep working.
# If state group disk space is an issue after the port, the
# `mark_unreferenced_state_groups_for_deletion_bg_update` background task can be run again.
"state_groups_pending_deletion",
# We don't port these tables, as they're a faff and we can regenerate
# them anyway.
"user_directory",
@@ -222,15 +217,6 @@ IGNORED_TABLES = {
}
# These background updates will not be applied upon creation of the postgres database.
IGNORED_BACKGROUND_UPDATES = {
# Reapplying this background update to the postgres database is unnecessary after
# already having waited for the SQLite database to complete all running background
# updates.
"mark_unreferenced_state_groups_for_deletion_bg_update",
}
# Error returned by the run function. Used at the top-level part of the script to
# handle errors and return codes.
end_error: Optional[str] = None
@@ -702,20 +688,6 @@ class Porter:
# 0 means off. 1 means full. 2 means incremental.
return autovacuum_setting != 0
async def remove_ignored_background_updates_from_database(self) -> None:
def _remove_delete_unreferenced_state_groups_bg_updates(
txn: LoggingTransaction,
) -> None:
txn.execute(
"DELETE FROM background_updates WHERE update_name = ANY(?)",
(list(IGNORED_BACKGROUND_UPDATES),),
)
await self.postgres_store.db_pool.runInteraction(
"remove_delete_unreferenced_state_groups_bg_updates",
_remove_delete_unreferenced_state_groups_bg_updates,
)
async def run(self) -> None:
"""Ports the SQLite database to a PostgreSQL database.
@@ -761,8 +733,6 @@ class Porter:
self.hs_config.database.get_single_database()
)
await self.remove_ignored_background_updates_from_database()
await self.run_background_updates_on_postgres()
self.progress.set_state("Creating port tables")

View File

@@ -19,7 +19,6 @@
#
#
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from urllib.parse import urlencode
@@ -45,11 +44,9 @@ from synapse.api.errors import (
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
from synapse.types import Requester, UserID, create_requester
from synapse.util import json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature
@@ -79,61 +76,6 @@ def scope_to_list(scope: str) -> List[str]:
return scope.strip().split(" ")
@dataclass
class IntrospectionResult:
_inner: IntrospectionToken
# when we retrieved this token,
# in milliseconds since the Unix epoch
retrieved_at_ms: int
def is_active(self, now_ms: int) -> bool:
if not self._inner.get("active"):
return False
expires_in = self._inner.get("expires_in")
if expires_in is None:
return True
if not isinstance(expires_in, int):
raise InvalidClientTokenError("token `expires_in` is not an int")
absolute_expiry_ms = expires_in * 1000 + self.retrieved_at_ms
return now_ms < absolute_expiry_ms
def get_scope_list(self) -> List[str]:
value = self._inner.get("scope")
if not isinstance(value, str):
return []
return scope_to_list(value)
def get_sub(self) -> Optional[str]:
value = self._inner.get("sub")
if not isinstance(value, str):
return None
return value
def get_username(self) -> Optional[str]:
value = self._inner.get("username")
if not isinstance(value, str):
return None
return value
def get_name(self) -> Optional[str]:
value = self._inner.get("name")
if not isinstance(value, str):
return None
return value
def get_device_id(self) -> Optional[str]:
value = self._inner.get("device_id")
if value is not None and not isinstance(value, str):
raise AuthError(
500,
"Invalid device ID in introspection result",
)
return value
class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc]
"""An implementation of the private_key_jwt client auth method that includes a kid header.
@@ -178,34 +120,6 @@ class MSC3861DelegatedAuth(BaseAuth):
self._http_client = hs.get_proxied_http_client()
self._hostname = hs.hostname
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
# # Token Introspection Cache
# This remembers what users/devices are represented by which access tokens,
# in order to reduce overall system load:
# - on Synapse (as requests are relatively expensive)
# - on the network
# - on MAS
#
# Since there is no invalidation mechanism currently,
# the entries expire after 2 minutes.
# This does mean tokens can be treated as valid by Synapse
# for longer than reality.
#
# Ideally, tokens should logically be invalidated in the following circumstances:
# - If a session logout happens.
# In this case, MAS will delete the device within Synapse
# anyway and this is good enough as an invalidation.
# - If the client refreshes their token in MAS.
# In this case, the device still exists and it's not the end of the world for
# the old access token to continue working for a short time.
self._introspection_cache: ResponseCache[str] = ResponseCache(
self._clock,
"token_introspection",
timeout_ms=120_000,
# don't log because the keys are access tokens
enable_logging=False,
)
self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata](
self._load_metadata
@@ -279,9 +193,7 @@ class MSC3861DelegatedAuth(BaseAuth):
metadata = await self._issuer_metadata.get()
return metadata.get("introspection_endpoint")
async def _introspect_token(
self, token: str, cache_context: ResponseCacheContext[str]
) -> IntrospectionResult:
async def _introspect_token(self, token: str) -> IntrospectionToken:
"""
Send a token to the introspection endpoint and returns the introspection response
@@ -297,8 +209,6 @@ class MSC3861DelegatedAuth(BaseAuth):
Returns:
The introspection response
"""
# By default, we shouldn't cache the result unless we know it's valid
cache_context.should_cache = False
introspection_endpoint = await self._introspection_endpoint()
raw_headers: Dict[str, str] = {
"Content-Type": "application/x-www-form-urlencoded",
@@ -356,11 +266,7 @@ class MSC3861DelegatedAuth(BaseAuth):
"The introspection endpoint returned an invalid JSON response."
)
# We had a valid response, so we can cache it
cache_context.should_cache = True
return IntrospectionResult(
IntrospectionToken(**resp), retrieved_at_ms=self._clock.time_msec()
)
return IntrospectionToken(**resp)
async def is_server_admin(self, requester: Requester) -> bool:
return "urn:synapse:admin:*" in requester.scope
@@ -371,55 +277,6 @@ class MSC3861DelegatedAuth(BaseAuth):
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
"""Get a registered user's ID.
Args:
request: An HTTP request with an access_token query parameter.
allow_guest: If False, will raise an AuthError if the user making the
request is a guest.
allow_expired: If True, allow the request through even if the account
is expired, or session token lifetime has ended. Note that
/login will deliver access tokens regardless of expiration.
Returns:
Resolves to the requester
Raises:
InvalidClientCredentialsError if no user by that token exists or the token
is invalid.
AuthError if access is denied for the user in the access token
"""
parent_span = active_span()
with start_active_span("get_user_by_req"):
requester = await self._wrapped_get_user_by_req(
request, allow_guest, allow_expired, allow_locked
)
if parent_span:
if requester.authenticated_entity in self._force_tracing_for_users:
# request tracing is enabled for this user, so we need to force it
# tracing on for the parent span (which will be the servlet span).
#
# It's too late for the get_user_by_req span to inherit the setting,
# so we also force it on for that.
force_tracing()
force_tracing(parent_span)
parent_span.set_tag(
"authenticated_entity", requester.authenticated_entity
)
parent_span.set_tag("user_id", requester.user.to_string())
if requester.device_id is not None:
parent_span.set_tag("device_id", requester.device_id)
if requester.app_service is not None:
parent_span.set_tag("appservice_id", requester.app_service.id)
return requester
async def _wrapped_get_user_by_req(
self,
request: SynapseRequest,
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
access_token = self.get_access_token_from_request(request)
@@ -487,9 +344,7 @@ class MSC3861DelegatedAuth(BaseAuth):
)
try:
introspection_result = await self._introspection_cache.wrap(
token, self._introspect_token, token, cache_context=True
)
introspection_result = await self._introspect_token(token)
except Exception:
logger.exception("Failed to introspect token")
raise SynapseError(503, "Unable to introspect the access token")
@@ -498,11 +353,11 @@ class MSC3861DelegatedAuth(BaseAuth):
# TODO: introspection verification should be more extensive, especially:
# - verify the audience
if not introspection_result.is_active(self._clock.time_msec()):
if not introspection_result.get("active"):
raise InvalidClientTokenError("Token is not active")
# Let's look at the scope
scope: List[str] = introspection_result.get_scope_list()
scope: List[str] = scope_to_list(introspection_result.get("scope", ""))
# Determine type of user based on presence of particular scopes
has_user_scope = SCOPE_MATRIX_API in scope
@@ -512,7 +367,7 @@ class MSC3861DelegatedAuth(BaseAuth):
raise InvalidClientTokenError("No scope in token granting user rights")
# Match via the sub claim
sub: Optional[str] = introspection_result.get_sub()
sub: Optional[str] = introspection_result.get("sub")
if sub is None:
raise InvalidClientTokenError(
"Invalid sub claim in the introspection result"
@@ -526,7 +381,7 @@ class MSC3861DelegatedAuth(BaseAuth):
# or the external_id was never recorded
# TODO: claim mapping should be configurable
username: Optional[str] = introspection_result.get_username()
username: Optional[str] = introspection_result.get("username")
if username is None or not isinstance(username, str):
raise AuthError(
500,
@@ -544,7 +399,7 @@ class MSC3861DelegatedAuth(BaseAuth):
# TODO: claim mapping should be configurable
# If present, use the name claim as the displayname
name: Optional[str] = introspection_result.get_name()
name: Optional[str] = introspection_result.get("name")
await self.store.register_user(
user_id=user_id.to_string(), create_profile_with_displayname=name
@@ -559,8 +414,15 @@ class MSC3861DelegatedAuth(BaseAuth):
# MAS 0.15+ will give us the device ID as an explicit value for compatibility sessions
# If present, we get it from here, if not we get it in thee scope
device_id = introspection_result.get_device_id()
if device_id is None:
device_id = introspection_result.get("device_id")
if device_id is not None:
# We got the device ID explicitly, just sanity check that it's a string
if not isinstance(device_id, str):
raise AuthError(
500,
"Invalid device ID in introspection result",
)
else:
# Find device_ids in scope
# We only allow a single device_id in the scope, so we find them all in the
# scope list, and raise if there are more than one. The OIDC server should be

View File

@@ -29,13 +29,8 @@ from typing import Final
# the max size of a (canonical-json-encoded) event
MAX_PDU_SIZE = 65536
# Max/min size of ints in canonical JSON
CANONICALJSON_MAX_INT = (2**53) - 1
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
# the "depth" field on events is limited to the same as what
# canonicaljson accepts
MAX_DEPTH = CANONICALJSON_MAX_INT
# the "depth" field on events is limited to 2**63 - 1
MAX_DEPTH = 2**63 - 1
# the maximum length for a room alias is 255 characters
MAX_ALIAS_LENGTH = 255

View File

@@ -20,7 +20,8 @@
#
#
from typing import Dict, Hashable, Optional, Tuple
from collections import OrderedDict
from typing import Hashable, Optional, Tuple
from synapse.api.errors import LimitExceededError
from synapse.config.ratelimiting import RatelimitSettings
@@ -79,14 +80,12 @@ class Ratelimiter:
self.store = store
self._limiter_name = cfg.key
# A dictionary representing the token buckets tracked by this rate
# An ordered dictionary representing the token buckets tracked by this rate
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
# * The number of tokens currently in the bucket,
# * The time point when the bucket was last completely empty, and
# * The rate_hz (leak rate) of this particular bucket.
self.actions: Dict[Hashable, Tuple[float, float, float]] = {}
self.clock.looping_call(self._prune_message_counts, 60 * 1000)
self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
def _get_key(
self, requester: Optional[Requester], key: Optional[Hashable]
@@ -170,6 +169,9 @@ class Ratelimiter:
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
burst_count = burst_count if burst_count is not None else self.burst_count
# Remove any expired entries
self._prune_message_counts(time_now_s)
# Check if there is an existing count entry for this key
action_count, time_start, _ = self._get_action_counts(key, time_now_s)
@@ -244,12 +246,13 @@ class Ratelimiter:
action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
self.actions[key] = (action_count + n_actions, time_start, rate_hz)
def _prune_message_counts(self) -> None:
def _prune_message_counts(self, time_now_s: float) -> None:
"""Remove message count entries that have not exceeded their defined
rate_hz limit
"""
time_now_s = self.clock.time()
Args:
time_now_s: The current time
"""
# We create a copy of the key list here as the dictionary is modified during
# the loop
for key in list(self.actions.keys()):

View File

@@ -21,7 +21,7 @@
#
import logging
import sys
from typing import Dict, List, cast
from typing import Dict, List
from twisted.web.resource import Resource
@@ -51,8 +51,8 @@ from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.rest import ClientRestResource, admin
from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
from synapse.rest import ClientRestResource
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -190,11 +190,7 @@ class GenericWorkerServer(HomeServer):
resources.update(build_synapse_client_resource_tree(self))
resources["/.well-known"] = well_known_resource(self)
admin_res = resources.get("/_synapse/admin")
if admin_res is not None:
admin.register_servlets(self, cast(JsonResource, admin_res))
else:
resources["/_synapse/admin"] = AdminRestResource(self)
elif name == "federation":
resources[FEDERATION_PREFIX] = TransportLayerServer(self)
elif name == "media":
@@ -203,21 +199,15 @@ class GenericWorkerServer(HomeServer):
# We need to serve the admin servlets for media on the
# worker.
admin_res = resources.get("/_synapse/admin")
if admin_res is not None:
register_servlets_for_media_repo(
self, cast(JsonResource, admin_res)
)
else:
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources["/_synapse/admin"] = admin_resource
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources.update(
{
MEDIA_R0_PREFIX: media_repo,
MEDIA_V3_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
"/_synapse/admin": admin_resource,
}
)

View File

@@ -356,9 +356,6 @@ def _parse_oidc_config_dict(
additional_authorization_parameters=oidc_config.get(
"additional_authorization_parameters", {}
),
passthrough_authorization_parameters=oidc_config.get(
"passthrough_authorization_parameters", []
),
)
@@ -504,6 +501,3 @@ class OidcProviderConfig:
# Additional parameters that will be passed to the authorization grant URL
additional_authorization_parameters: Mapping[str, str]
# Allow query parameters to the redirect endpoint that will be passed to the authorization grant URL
passthrough_authorization_parameters: Collection[str]

View File

@@ -40,8 +40,6 @@ import attr
from canonicaljson import encode_canonical_json
from synapse.api.constants import (
CANONICALJSON_MAX_INT,
CANONICALJSON_MIN_INT,
MAX_PDU_SIZE,
EventContentFields,
EventTypes,
@@ -63,6 +61,9 @@ SPLIT_FIELD_REGEX = re.compile(r"\\*\.")
# Find escaped characters, e.g. those with a \ in front of them.
ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)")
CANONICALJSON_MAX_INT = (2**53) - 1
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
# Module API callback that allows adding fields to the unsigned section of
# events that are sent to clients.

View File

@@ -86,7 +86,9 @@ class EventValidator:
# Depending on the room version, ensure the data is spec compliant JSON.
if event.room_version.strict_canonicaljson:
validate_canonicaljson(event.get_pdu_json())
# Note that only the client controlled portion of the event is
# checked, since we trust the portions of the event we created.
validate_canonicaljson(event.content)
if event.type == EventTypes.Aliases:
if "aliases" in event.content:

View File

@@ -20,7 +20,7 @@
#
#
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Sequence
from typing import TYPE_CHECKING, Awaitable, Callable, Optional
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -29,7 +29,6 @@ from synapse.crypto.event_signing import check_event_content_hash
from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.federation.units import filter_pdus_for_valid_depth
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.opentracing import log_kv, trace
from synapse.types import JsonDict, get_domain_from_id
@@ -268,15 +267,6 @@ def _is_invite_via_3pid(event: EventBase) -> bool:
)
def parse_events_from_pdu_json(
pdus_json: Sequence[JsonDict], room_version: RoomVersion
) -> List[EventBase]:
return [
event_from_pdu_json(pdu_json, room_version)
for pdu_json in filter_pdus_for_valid_depth(pdus_json)
]
def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase:
"""Construct an EventBase from an event json received over federation

View File

@@ -68,7 +68,6 @@ from synapse.federation.federation_base import (
FederationBase,
InvalidEventSignatureError,
event_from_pdu_json,
parse_events_from_pdu_json,
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.client import is_unknown_endpoint
@@ -350,7 +349,7 @@ class FederationClient(FederationBase):
room_version = await self.store.get_room_version(room_id)
pdus = parse_events_from_pdu_json(transaction_data_pdus, room_version)
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
# Check signatures and hash of pdus, removing any from the list that fail checks
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
@@ -394,7 +393,9 @@ class FederationClient(FederationBase):
transaction_data,
)
pdu_list = parse_events_from_pdu_json(transaction_data["pdus"], room_version)
pdu_list: List[EventBase] = [
event_from_pdu_json(p, room_version) for p in transaction_data["pdus"]
]
if pdu_list and pdu_list[0]:
pdu = pdu_list[0]
@@ -808,7 +809,7 @@ class FederationClient(FederationBase):
room_version = await self.store.get_room_version(room_id)
auth_chain = parse_events_from_pdu_json(res["auth_chain"], room_version)
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_chain, room_version=room_version
@@ -1528,7 +1529,9 @@ class FederationClient(FederationBase):
room_version = await self.store.get_room_version(room_id)
events = parse_events_from_pdu_json(content.get("events", []), room_version)
events = [
event_from_pdu_json(e, room_version) for e in content.get("events", [])
]
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, events, room_version=room_version

View File

@@ -66,7 +66,7 @@ from synapse.federation.federation_base import (
event_from_pdu_json,
)
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus
from synapse.federation.units import Edu, Transaction
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
@@ -469,12 +469,7 @@ class FederationServer(FederationBase):
logger.info("Ignoring PDU: %s", e)
continue
try:
event = event_from_pdu_json(p, room_version)
except SynapseError as e:
logger.info("Ignoring PDU for failing to deserialize: %s", e)
continue
event = event_from_pdu_json(p, room_version)
pdus_by_room.setdefault(room_id, []).append(event)
if event.origin_server_ts > newest_pdu_ts:
@@ -641,8 +636,8 @@ class FederationServer(FederationBase):
)
return {
"pdus": serialize_and_filter_pdus(pdus),
"auth_chain": serialize_and_filter_pdus(auth_chain),
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
async def on_pdu_request(
@@ -766,8 +761,8 @@ class FederationServer(FederationBase):
event_json = event.get_pdu_json(time_now)
resp = {
"event": event_json,
"state": serialize_and_filter_pdus(state_events, time_now),
"auth_chain": serialize_and_filter_pdus(auth_chain_events, time_now),
"state": [p.get_pdu_json(time_now) for p in state_events],
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
"members_omitted": caller_supports_partial_state,
}
@@ -1010,7 +1005,7 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
auth_pdus = await self.handler.on_event_auth(event_id)
res = {"auth_chain": serialize_and_filter_pdus(auth_pdus, time_now)}
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
async def on_query_client_keys(
@@ -1095,7 +1090,7 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
return {"events": serialize_and_filter_pdus(missing_events, time_now)}
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
async def on_openid_userinfo(self, token: str) -> Optional[str]:
ts_now_ms = self._clock.time_msec()

View File

@@ -24,12 +24,10 @@ server protocol.
"""
import logging
from typing import List, Optional, Sequence
from typing import List, Optional
import attr
from synapse.api.constants import CANONICALJSON_MAX_INT, CANONICALJSON_MIN_INT
from synapse.events import EventBase
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@@ -106,28 +104,8 @@ class Transaction:
result = {
"origin": self.origin,
"origin_server_ts": self.origin_server_ts,
"pdus": filter_pdus_for_valid_depth(self.pdus),
"pdus": self.pdus,
}
if self.edus:
result["edus"] = self.edus
return result
def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]:
filtered_pdus = []
for pdu in pdus:
# Drop PDUs that have a depth that is outside of the range allowed
# by canonical json.
if (
"depth" in pdu
and CANONICALJSON_MIN_INT <= pdu["depth"] <= CANONICALJSON_MAX_INT
):
filtered_pdus.append(pdu)
return filtered_pdus
def serialize_and_filter_pdus(
pdus: Sequence[EventBase], time_now: Optional[int] = None
) -> List[JsonDict]:
return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus])

View File

@@ -163,8 +163,6 @@ class DeviceWorkerHandler:
raise errors.NotFoundError()
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
device = dict(device)
_update_device_from_client_ips(device, ips)
set_tag("device", str(device))

View File

@@ -467,10 +467,6 @@ class OidcProvider:
self._sso_handler.register_identity_provider(self)
self.passthrough_authorization_parameters = (
provider.passthrough_authorization_parameters
)
def _validate_metadata(self, m: OpenIDProviderMetadata) -> None:
"""Verifies the provider metadata.
@@ -1009,6 +1005,7 @@ class OidcProvider:
when everything is done (or None for UI Auth)
ui_auth_session_id: The session ID of the ongoing UI Auth (or
None if this is a login).
Returns:
The redirect URL to the authorization endpoint.
@@ -1081,13 +1078,6 @@ class OidcProvider:
)
)
# add passthrough additional authorization parameters
passthrough_authorization_parameters = self.passthrough_authorization_parameters
for parameter in passthrough_authorization_parameters:
parameter_value = parse_string(request, parameter)
if parameter_value:
additional_authorization_parameters.update({parameter: parameter_value})
authorization_endpoint = metadata.get("authorization_endpoint")
return prepare_grant_uri(
authorization_endpoint,

View File

@@ -59,11 +59,7 @@ from synapse.media._base import (
respond_with_responder,
)
from synapse.media.filepath import MediaFilePaths
from synapse.media.media_storage import (
MediaStorage,
SHA256TransparentIOReader,
SHA256TransparentIOWriter,
)
from synapse.media.media_storage import MediaStorage
from synapse.media.storage_provider import StorageProviderWrapper
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
from synapse.media.url_previewer import UrlPreviewer
@@ -305,26 +301,15 @@ class MediaRepository:
auth_user: The user_id of the uploader
"""
file_info = FileInfo(server_name=None, file_id=media_id)
sha256reader = SHA256TransparentIOReader(content)
# This implements all of IO as it has a passthrough
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
sha256 = sha256reader.hexdigest()
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
fname = await self.media_storage.store_file(content, file_info)
logger.info("Stored local media in file %r", fname)
if should_quarantine:
logger.warn(
"Media has been automatically quarantined as it matched existing quarantined media"
)
await self.store.update_local_media(
media_id=media_id,
media_type=media_type,
upload_name=upload_name,
media_length=content_length,
user_id=auth_user,
sha256=sha256,
quarantined_by="system" if should_quarantine else None,
)
try:
@@ -357,19 +342,11 @@ class MediaRepository:
media_id = random_string(24)
file_info = FileInfo(server_name=None, file_id=media_id)
# This implements all of IO as it has a passthrough
sha256reader = SHA256TransparentIOReader(content)
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
sha256 = sha256reader.hexdigest()
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
fname = await self.media_storage.store_file(content, file_info)
logger.info("Stored local media in file %r", fname)
if should_quarantine:
logger.warn(
"Media has been automatically quarantined as it matched existing quarantined media"
)
await self.store.store_local_media(
media_id=media_id,
media_type=media_type,
@@ -377,9 +354,6 @@ class MediaRepository:
upload_name=upload_name,
media_length=content_length,
user_id=auth_user,
sha256=sha256,
# TODO: Better name?
quarantined_by="system" if should_quarantine else None,
)
try:
@@ -782,13 +756,11 @@ class MediaRepository:
file_info = FileInfo(server_name=server_name, file_id=file_id)
async with self.media_storage.store_into_file(file_info) as (f, fname):
sha256writer = SHA256TransparentIOWriter(f)
try:
length, headers = await self.client.download_media(
server_name,
media_id,
# This implements all of BinaryIO as it has a passthrough
output_stream=sha256writer.wrap(),
output_stream=f,
max_size=self.max_upload_size,
max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
@@ -853,7 +825,6 @@ class MediaRepository:
upload_name=upload_name,
media_length=length,
filesystem_id=file_id,
sha256=sha256writer.hexdigest(),
)
logger.info("Stored remote media in file %r", fname)
@@ -874,7 +845,6 @@ class MediaRepository:
last_access_ts=time_now_ms,
quarantined_by=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
async def _federation_download_remote_file(
@@ -909,13 +879,11 @@ class MediaRepository:
file_info = FileInfo(server_name=server_name, file_id=file_id)
async with self.media_storage.store_into_file(file_info) as (f, fname):
sha256writer = SHA256TransparentIOWriter(f)
try:
res = await self.client.federation_download_media(
server_name,
media_id,
# This implements all of BinaryIO as it has a passthrough
output_stream=sha256writer.wrap(),
output_stream=f,
max_size=self.max_upload_size,
max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
@@ -986,7 +954,6 @@ class MediaRepository:
upload_name=upload_name,
media_length=length,
filesystem_id=file_id,
sha256=sha256writer.hexdigest(),
)
logger.debug("Stored remote media in file %r", fname)
@@ -1007,7 +974,6 @@ class MediaRepository:
last_access_ts=time_now_ms,
quarantined_by=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
def _get_thumbnail_requirements(

View File

@@ -19,7 +19,6 @@
#
#
import contextlib
import hashlib
import json
import logging
import os
@@ -71,88 +70,6 @@ logger = logging.getLogger(__name__)
CRLF = b"\r\n"
class SHA256TransparentIOWriter:
"""Will generate a SHA256 hash from a source stream transparently.
Args:
source: Source stream.
"""
def __init__(self, source: BinaryIO):
self._hash = hashlib.sha256()
self._source = source
def write(self, buffer: Union[bytes, bytearray]) -> int:
"""Wrapper for source.write()
Args:
buffer
Returns:
the value of source.write()
"""
res = self._source.write(buffer)
self._hash.update(buffer)
return res
def hexdigest(self) -> str:
"""The digest of the written or read value.
Returns:
The digest in hex formaat.
"""
return self._hash.hexdigest()
def wrap(self) -> BinaryIO:
# This class implements a subset the IO interface and passes through everything else via __getattr__
return cast(BinaryIO, self)
# Passthrough any other calls
def __getattr__(self, attr_name: str) -> Any:
return getattr(self._source, attr_name)
class SHA256TransparentIOReader:
"""Will generate a SHA256 hash from a source stream transparently.
Args:
source: Source IO stream.
"""
def __init__(self, source: IO):
self._hash = hashlib.sha256()
self._source = source
def read(self, n: int = -1) -> bytes:
"""Wrapper for source.read()
Args:
n
Returns:
the value of source.read()
"""
bytes = self._source.read(n)
self._hash.update(bytes)
return bytes
def hexdigest(self) -> str:
"""The digest of the written or read value.
Returns:
The digest in hex formaat.
"""
return self._hash.hexdigest()
def wrap(self) -> IO:
# This class implements a subset the IO interface and passes through everything else via __getattr__
return cast(IO, self)
# Passthrough any other calls
def __getattr__(self, attr_name: str) -> Any:
return getattr(self._source, attr_name)
class MediaStorage:
"""Responsible for storing/fetching files from local sources.
@@ -190,6 +107,7 @@ class MediaStorage:
Returns:
the file path written to in the primary media store
"""
async with self.store_into_file(file_info) as (f, fname):
# Write to the main media repository
await self.write_to_file(source, f)

View File

@@ -66,6 +66,7 @@ from synapse.types import (
from synapse.util.async_helpers import (
timeout_deferred,
)
from synapse.util.metrics import Measure
from synapse.util.stringutils import shortstr
from synapse.visibility import filter_events_for_client
@@ -519,22 +520,20 @@ class Notifier:
users = users or []
rooms = rooms or []
user_streams: Set[_NotifierUserStream] = set()
with Measure(self.clock, "on_new_event"):
user_streams: Set[_NotifierUserStream] = set()
log_kv(
{
"waking_up_explicit_users": len(users),
"waking_up_explicit_rooms": len(rooms),
"users": shortstr(users),
"rooms": shortstr(rooms),
"stream": stream_key,
"stream_id": new_token,
}
)
log_kv(
{
"waking_up_explicit_users": len(users),
"waking_up_explicit_rooms": len(rooms),
"users": shortstr(users),
"rooms": shortstr(rooms),
"stream": stream_key,
"stream_id": new_token,
}
)
# Only calculate which user streams to wake up if there are, in fact,
# any user streams registered.
if self.user_to_user_stream or self.room_to_user_streams:
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
@@ -566,25 +565,25 @@ class Notifier:
# We resolve all these deferreds in one go so that we only need to
# call `PreserveLoggingContext` once, as it has a bunch of overhead
# (to calculate performance stats)
if listeners:
with PreserveLoggingContext():
for listener in listeners:
listener.callback(current_token)
with PreserveLoggingContext():
for listener in listeners:
listener.callback(current_token)
if user_streams:
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
self.notify_replication()
self.notify_replication()
# Notify appservices.
try:
self.appservice_handler.notify_interested_services_ephemeral(
stream_key,
new_token,
users,
)
except Exception:
logger.exception("Error notifying application services of ephemeral events")
# Notify appservices.
try:
self.appservice_handler.notify_interested_services_ephemeral(
stream_key,
new_token,
users,
)
except Exception:
logger.exception(
"Error notifying application services of ephemeral events"
)
def on_new_replication_data(self) -> None:
"""Used to inform replication listeners that something has happened

View File

@@ -205,12 +205,6 @@ class HttpPusher(Pusher):
if self._is_processing:
return
# Check if we are trying, but failing, to contact the pusher. If so, we
# don't try and start processing immediately and instead wait for the
# retry loop to try again later (which is controlled by the timer).
if self.failing_since and self.timed_call and self.timed_call.active():
return
run_as_background_process("httppush.process", self._process)
async def _process(self) -> None:

View File

@@ -275,9 +275,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Register all the admin servlets.
"""
RoomRestServlet(hs).register(http_server)
# Admin servlets below may not work on workers.
# Admin servlets aren't registered on workers.
if hs.config.worker.worker_app is not None:
return
@@ -285,6 +283,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
BlockRoomRestServlet(hs).register(http_server)
ListRoomRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
RoomRestServlet(hs).register(http_server)
RoomRestV2Servlet(hs).register(http_server)
RoomMembersRestServlet(hs).register(http_server)
DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)

View File

@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.device_handler = handler
self.auth_handler = hs.get_auth_handler()
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
self._is_main_process = hs.config.worker.worker_app is None
async def on_GET(
self, request: SynapseRequest, device_id: str
@@ -179,14 +179,6 @@ class DeviceRestServlet(RestServlet):
async def on_DELETE(
self, request: SynapseRequest, device_id: str
) -> Tuple[int, JsonDict]:
# Can only be run on main process, as changes to device lists must
# happen on main.
if not self._is_main_process:
error_message = "DELETE on /devices/ must be routed to main process"
logger.error(error_message)
raise SynapseError(500, error_message)
assert isinstance(self.device_handler, DeviceHandler)
requester = await self.auth.get_user_by_req(request)
try:
@@ -231,14 +223,6 @@ class DeviceRestServlet(RestServlet):
async def on_PUT(
self, request: SynapseRequest, device_id: str
) -> Tuple[int, JsonDict]:
# Can only be run on main process, as changes to device lists must
# happen on main.
if not self._is_main_process:
error_message = "PUT on /devices/ must be routed to main process"
logger.error(error_message)
raise SynapseError(500, error_message)
assert isinstance(self.device_handler, DeviceHandler)
requester = await self.auth.get_user_by_req(request, allow_guest=True)
body = parse_and_validate_json_object_from_request(request, self.PutBody)
@@ -601,9 +585,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
):
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
if hs.config.worker.worker_app is None:
DeviceRestServlet(hs).register(http_server)
if hs.config.experimental.msc2697_enabled:
DehydratedDeviceServlet(hs).register(http_server)
ClaimDehydratedDeviceServlet(hs).register(http_server)

View File

@@ -24,7 +24,7 @@ from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
from synapse.api.ratelimiting import Ratelimiter
@@ -248,8 +248,9 @@ class SyncRestServlet(RestServlet):
await self._server_notices_sender.on_user_syncing(user.to_string())
# ignore the presence update if the ratelimit is exceeded but do not pause the request
allowed, _ = await self._presence_per_user_limiter.can_do_action(requester)
if not allowed:
try:
await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
except LimitExceededError:
affect_presence = False
logger.debug("User set_presence ratelimit exceeded; ignoring it.")
else:

View File

@@ -21,19 +21,11 @@
import itertools
import logging
from typing import (
TYPE_CHECKING,
Collection,
Mapping,
Optional,
Set,
)
from typing import TYPE_CHECKING, Collection, Mapping, Set
from synapse.logging.context import nested_logging_context
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage.database import LoggingTransaction
from synapse.storage.databases import Databases
from synapse.types.storage import _BackgroundUpdates
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -52,11 +44,6 @@ class PurgeEventsStorageController:
self._delete_state_groups_loop, 60 * 1000
)
self.stores.state.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE,
self._background_delete_unrefereneced_state_groups,
)
async def purge_room(self, room_id: str) -> None:
"""Deletes all record of a room"""
@@ -94,8 +81,7 @@ class PurgeEventsStorageController:
)
async def _find_unreferenced_groups(
self,
state_groups: Collection[int],
self, state_groups: Collection[int]
) -> Set[int]:
"""Used when purging history to figure out which state groups can be
deleted.
@@ -217,232 +203,3 @@ class PurgeEventsStorageController:
room_id,
groups_to_sequences,
)
async def _background_delete_unrefereneced_state_groups(
self, progress: dict, batch_size: int
) -> int:
"""This background update will slowly delete any unreferenced state groups"""
last_checked_state_group = progress.get("last_checked_state_group")
if last_checked_state_group is None:
# This is the first run.
last_checked_state_group = (
await self.stores.state.db_pool.simple_select_one_onecol(
table="state_groups",
keyvalues={},
retcol="MAX(id)",
allow_none=True,
desc="get_max_state_group",
)
)
if last_checked_state_group is None:
# There are no state groups so the background process is finished.
await self.stores.state.db_pool.updates._end_background_update(
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE
)
return batch_size
last_checked_state_group += 1
(
last_checked_state_group,
final_batch,
) = await self._delete_unreferenced_state_groups_batch(
last_checked_state_group,
batch_size,
)
if not final_batch:
# There are more state groups to check.
progress = {
"last_checked_state_group": last_checked_state_group,
}
await self.stores.state.db_pool.updates._background_update_progress(
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE,
progress,
)
else:
# This background process is finished.
await self.stores.state.db_pool.updates._end_background_update(
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE
)
return batch_size
async def _delete_unreferenced_state_groups_batch(
self,
last_checked_state_group: int,
batch_size: int,
) -> tuple[int, bool]:
"""Looks for unreferenced state groups starting from the last state group
checked and marks them for deletion.
Args:
last_checked_state_group: The last state group that was checked.
batch_size: How many state groups to process in this iteration.
Returns:
(last_checked_state_group, final_batch)
"""
# Find all state groups that can be deleted if any of the original set are deleted.
(
to_delete,
last_checked_state_group,
final_batch,
) = await self._find_unreferenced_groups_for_background_deletion(
last_checked_state_group, batch_size
)
if len(to_delete) == 0:
return last_checked_state_group, final_batch
await self.stores.state_deletion.mark_state_groups_as_pending_deletion(
to_delete
)
return last_checked_state_group, final_batch
async def _find_unreferenced_groups_for_background_deletion(
self,
last_checked_state_group: int,
batch_size: int,
) -> tuple[Set[int], int, bool]:
"""Used when deleting unreferenced state groups in the background to figure out
which state groups can be deleted.
To avoid increased DB usage due to de-deltaing state groups, this returns only
state groups which are free standing (ie. no shared edges with referenced groups) or
state groups which do not share edges which result in a future referenced group.
The following scenarios outline the possibilities based on state group data in
the DB.
ie. Free standing -> state groups 1-N would be returned:
SG_1
|
...
|
SG_N
ie. Previous reference -> state groups 2-N would be returned:
SG_1 <- referenced by event
|
SG_2
|
...
|
SG_N
ie. Future reference -> none of the following state groups would be returned:
SG_1
|
SG_2
|
...
|
SG_N <- referenced by event
Args:
last_checked_state_group: The last state group that was checked.
batch_size: How many state groups to process in this iteration.
Returns:
(to_delete, last_checked_state_group, final_batch)
"""
# If a state group's next edge is not pending deletion then we don't delete the state group.
# If there is no next edge or the next edges are all marked for deletion, then delete
# the state group.
# This holds since we walk backwards from the latest state groups, ensuring that
# we've already checked newer state groups for event references along the way.
def get_next_state_groups_marked_for_deletion_txn(
txn: LoggingTransaction,
) -> tuple[dict[int, bool], dict[int, int]]:
state_group_sql = """
SELECT s.id, e.state_group, d.state_group
FROM (
SELECT id FROM state_groups
WHERE id < ? ORDER BY id DESC LIMIT ?
) as s
LEFT JOIN state_group_edges AS e ON (s.id = e.prev_state_group)
LEFT JOIN state_groups_pending_deletion AS d ON (e.state_group = d.state_group)
"""
txn.execute(state_group_sql, (last_checked_state_group, batch_size))
# Mapping from state group to whether we should delete it.
state_groups_to_deletion: dict[int, bool] = {}
# Mapping from state group to prev state group.
state_groups_to_prev: dict[int, int] = {}
for row in txn:
state_group = row[0]
next_edge = row[1]
pending_deletion = row[2]
if next_edge is not None:
state_groups_to_prev[next_edge] = state_group
if next_edge is not None and not pending_deletion:
# We have found an edge not marked for deletion.
# Check previous results to see if this group is part of a chain
# within this batch that qualifies for deletion.
# ie. batch contains:
# SG_1 -> SG_2 -> SG_3
# If SG_3 is a candidate for deletion, then SG_2 & SG_1 should also
# be, even though they have edges which may not be marked for
# deletion.
# This relies on SQL results being sorted in DESC order to work.
next_is_deletion_candidate = state_groups_to_deletion.get(next_edge)
if (
next_is_deletion_candidate is None
or not next_is_deletion_candidate
):
state_groups_to_deletion[state_group] = False
else:
state_groups_to_deletion.setdefault(state_group, True)
else:
# This state group may be a candidate for deletion
state_groups_to_deletion.setdefault(state_group, True)
return state_groups_to_deletion, state_groups_to_prev
(
state_groups_to_deletion,
state_group_edges,
) = await self.stores.state.db_pool.runInteraction(
"get_next_state_groups_marked_for_deletion",
get_next_state_groups_marked_for_deletion_txn,
)
deletion_candidates = {
state_group
for state_group, deletion in state_groups_to_deletion.items()
if deletion
}
final_batch = False
state_groups = state_groups_to_deletion.keys()
if len(state_groups) < batch_size:
final_batch = True
else:
last_checked_state_group = min(state_groups)
if len(state_groups) == 0:
return set(), last_checked_state_group, final_batch
# Determine if any of the remaining state groups are directly referenced.
referenced = await self.stores.main.get_referenced_state_groups(
deletion_candidates
)
# Remove state groups from deletion_candidates which are directly referenced or share a
# future edge with a referenced state group within this batch.
def filter_reference_chains(group: Optional[int]) -> None:
while group is not None:
deletion_candidates.discard(group)
group = state_group_edges.get(group)
for referenced_group in referenced:
filter_reference_chains(referenced_group)
return deletion_candidates, last_checked_state_group, final_batch

View File

@@ -282,10 +282,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
"count_devices_by_users", count_devices_by_users_txn, user_ids
)
@cached()
async def get_device(
self, user_id: str, device_id: str
) -> Optional[Mapping[str, Any]]:
) -> Optional[Dict[str, Any]]:
"""Retrieve a device. Only returns devices that are not marked as
hidden.
@@ -1818,8 +1817,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
},
desc="store_device",
)
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
if not inserted:
# if the device already exists, check if it's a real device, or
# if the device ID is reserved by something else
@@ -1885,9 +1882,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
values=device_ids,
keyvalues={"user_id": user_id},
)
self._invalidate_cache_and_stream_bulk(
txn, self.get_device, [(user_id, device_id) for device_id in device_ids]
)
for batch in batch_iter(device_ids, 100):
await self.db_pool.runInteraction(
@@ -1921,7 +1915,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
updatevalues=updates,
desc="update_device",
)
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
async def update_remote_device_list_cache_entry(
self, user_id: str, device_id: str, content: JsonDict, stream_id: str

View File

@@ -19,7 +19,6 @@
# [This file includes modifications made by New Vector Limited]
#
#
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
@@ -52,8 +51,6 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = (
"media_repository_drop_index_wo_method_2"
)
logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class LocalMedia:
@@ -68,7 +65,6 @@ class LocalMedia:
safe_from_quarantine: bool
user_id: Optional[str]
authenticated: Optional[bool]
sha256: Optional[str]
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -83,7 +79,6 @@ class RemoteMedia:
last_access_ts: int
quarantined_by: Optional[str]
authenticated: Optional[bool]
sha256: Optional[str]
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -159,26 +154,6 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
unique=True,
)
self.db_pool.updates.register_background_index_update(
update_name="local_media_repository_sha256_idx",
index_name="local_media_repository_sha256",
table="local_media_repository",
where_clause="sha256 IS NOT NULL",
columns=[
"sha256",
],
)
self.db_pool.updates.register_background_index_update(
update_name="remote_media_cache_sha256_idx",
index_name="remote_media_cache_sha256",
table="remote_media_cache",
where_clause="sha256 IS NOT NULL",
columns=[
"sha256",
],
)
self.db_pool.updates.register_background_update_handler(
BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2,
self._drop_media_index_without_method,
@@ -246,7 +221,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"safe_from_quarantine",
"user_id",
"authenticated",
"sha256",
),
allow_none=True,
desc="get_local_media",
@@ -265,7 +239,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
safe_from_quarantine=row[7],
user_id=row[8],
authenticated=row[9],
sha256=row[10],
)
async def get_local_media_by_user_paginate(
@@ -322,8 +295,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
quarantined_by,
safe_from_quarantine,
user_id,
authenticated,
sha256
authenticated
FROM local_media_repository
WHERE user_id = ?
ORDER BY {order_by_column} {order}, media_id ASC
@@ -348,7 +320,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
safe_from_quarantine=bool(row[8]),
user_id=row[9],
authenticated=row[10],
sha256=row[11],
)
for row in txn
]
@@ -478,8 +449,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
media_length: int,
user_id: UserID,
url_cache: Optional[str] = None,
sha256: Optional[str] = None,
quarantined_by: Optional[str] = None,
) -> None:
if self.hs.config.media.enable_authenticated_media:
authenticated = True
@@ -497,8 +466,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"user_id": user_id.to_string(),
"url_cache": url_cache,
"authenticated": authenticated,
"sha256": sha256,
"quarantined_by": quarantined_by,
},
desc="store_local_media",
)
@@ -510,28 +477,20 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
upload_name: Optional[str],
media_length: int,
user_id: UserID,
sha256: str,
url_cache: Optional[str] = None,
quarantined_by: Optional[str] = None,
) -> None:
updatevalues = {
"media_type": media_type,
"upload_name": upload_name,
"media_length": media_length,
"url_cache": url_cache,
"sha256": sha256,
}
# This should never be un-set by this function.
if quarantined_by is not None:
updatevalues["quarantined_by"] = quarantined_by
await self.db_pool.simple_update_one(
"local_media_repository",
keyvalues={
"user_id": user_id.to_string(),
"media_id": media_id,
},
updatevalues=updatevalues,
updatevalues={
"media_type": media_type,
"upload_name": upload_name,
"media_length": media_length,
"url_cache": url_cache,
},
desc="update_local_media",
)
@@ -698,7 +657,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"last_access_ts",
"quarantined_by",
"authenticated",
"sha256",
),
allow_none=True,
desc="get_cached_remote_media",
@@ -716,7 +674,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
last_access_ts=row[5],
quarantined_by=row[6],
authenticated=row[7],
sha256=row[8],
)
async def store_cached_remote_media(
@@ -728,7 +685,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
time_now_ms: int,
upload_name: Optional[str],
filesystem_id: str,
sha256: Optional[str],
) -> None:
if self.hs.config.media.enable_authenticated_media:
authenticated = True
@@ -747,7 +703,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"filesystem_id": filesystem_id,
"last_access_ts": time_now_ms,
"authenticated": authenticated,
"sha256": sha256,
},
desc="store_cached_remote_media",
)
@@ -991,46 +946,3 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
await self.db_pool.runInteraction(
"delete_url_cache_media", _delete_url_cache_media_txn
)
async def get_is_hash_quarantined(self, sha256: str) -> bool:
"""Get whether a specific sha256 hash digest matches any quarantined media.
Returns:
None if the media_id doesn't exist.
"""
# If we don't have the index yet, performance tanks, so we return False.
# In the background updates, remote_media_cache_sha256_idx is created
# after local_media_repository_sha256_idx, which is why we only need to
# check for the completion of the former.
if not await self.db_pool.updates.has_completed_background_update(
"remote_media_cache_sha256_idx"
):
return False
def get_matching_media_txn(
txn: LoggingTransaction, table: str, sha256: str
) -> bool:
# Return on first match
sql = """
SELECT 1
FROM local_media_repository
WHERE sha256 = ? AND quarantined_by IS NOT NULL
UNION ALL
SELECT 1
FROM remote_media_cache
WHERE sha256 = ? AND quarantined_by IS NOT NULL
LIMIT 1
"""
txn.execute(sql, (sha256, sha256))
row = txn.fetchone()
return row is not None
return await self.db_pool.runInteraction(
"get_matching_media_txn",
get_matching_media_txn,
"local_media_repository",
sha256,
)

View File

@@ -759,37 +759,17 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
external_id: id on that system
user_id: complete mxid that it is mapped to
"""
self._invalidate_cache_and_stream(
txn, self.get_user_by_external_id, (auth_provider, external_id)
)
# This INSERT ... ON CONFLICT DO NOTHING statement will cause a
# 'could not serialize access due to concurrent update'
# if the row is added concurrently by another transaction.
# This is exactly what we want, as it makes the transaction get retried
# in a new snapshot where we can check for a genuine conflict.
was_inserted = self.db_pool.simple_upsert_txn(
self.db_pool.simple_insert_txn(
txn,
table="user_external_ids",
keyvalues={"auth_provider": auth_provider, "external_id": external_id},
values={},
insertion_values={"user_id": user_id},
values={
"auth_provider": auth_provider,
"external_id": external_id,
"user_id": user_id,
},
)
if not was_inserted:
existing_id = self.db_pool.simple_select_one_onecol_txn(
txn,
table="user_external_ids",
keyvalues={"auth_provider": auth_provider, "user_id": user_id},
retcol="external_id",
allow_none=True,
)
if existing_id != external_id:
raise ExternalIDReuseException(
f"{user_id!r} has external id {existing_id!r} for {auth_provider} but trying to add {external_id!r}"
)
async def remove_user_external_id(
self, auth_provider: str, external_id: str, user_id: str
) -> None:
@@ -809,9 +789,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
},
desc="remove_user_external_id",
)
await self.invalidate_cache_and_stream(
"get_user_by_external_id", (auth_provider, external_id)
)
async def replace_user_external_id(
self,
@@ -832,20 +809,29 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
ExternalIDReuseException if the new external_id could not be mapped.
"""
def _replace_user_external_id_txn(
def _remove_user_external_ids_txn(
txn: LoggingTransaction,
user_id: str,
) -> None:
"""Remove all mappings from external user ids to a mxid
If these mappings are not found, this method does nothing.
Args:
user_id: complete mxid that it is mapped to
"""
self.db_pool.simple_delete_txn(
txn,
table="user_external_ids",
keyvalues={"user_id": user_id},
)
for auth_provider, external_id in record_external_ids:
self._invalidate_cache_and_stream(
txn, self.get_user_by_external_id, (auth_provider, external_id)
)
def _replace_user_external_id_txn(
txn: LoggingTransaction,
) -> None:
_remove_user_external_ids_txn(txn, user_id)
for auth_provider, external_id in record_external_ids:
self._record_user_external_id_txn(
txn,
auth_provider,
@@ -861,7 +847,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
except self.database_engine.module.IntegrityError:
raise ExternalIDReuseException()
@cached()
async def get_user_by_external_id(
self, auth_provider: str, external_id: str
) -> Optional[str]:

View File

@@ -51,15 +51,11 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.config.homeserver import HomeServerConfig
from synapse.events import EventBase
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
from synapse.storage._base import (
db_to_json,
make_in_list_sql_clause,
)
from synapse.storage._base import db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_tuple_in_list_sql_clause,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.types import Cursor
@@ -1131,109 +1127,6 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return local_media_ids
def _quarantine_local_media_txn(
self,
txn: LoggingTransaction,
hashes: Set[str],
media_ids: Set[str],
quarantined_by: Optional[str],
) -> int:
"""Quarantine and unquarantine local media items.
Args:
txn (cursor)
hashes: A set of sha256 hashes for any media that should be quarantined
media_ids: A set of media IDs for any media that should be quarantined
quarantined_by: The ID of the user who initiated the quarantine request
If it is `None` media will be removed from quarantine
Returns:
The total number of media items quarantined
"""
total_media_quarantined = 0
# Effectively a legacy path, update any media that was explicitly named.
if media_ids:
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
txn.database_engine, "media_id", media_ids
)
sql = f"""
UPDATE local_media_repository
SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
# Note that a rowcount of -1 can be used to indicate no rows were affected.
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
# Update any media that was identified via hash.
if hashes:
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
txn.database_engine, "sha256", hashes
)
sql = f"""
UPDATE local_media_repository
SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
def _quarantine_remote_media_txn(
self,
txn: LoggingTransaction,
hashes: Set[str],
media: Set[Tuple[str, str]],
quarantined_by: Optional[str],
) -> int:
"""Quarantine and unquarantine remote items
Args:
txn (cursor)
hashes: A set of sha256 hashes for any media that should be quarantined
media_ids: A set of tuples (media_origin, media_id) for any media that should be quarantined
quarantined_by: The ID of the user who initiated the quarantine request
If it is `None` media will be removed from quarantine
Returns:
The total number of media items quarantined
"""
total_media_quarantined = 0
if media:
sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause(
txn.database_engine,
("media_origin", "media_id"),
media,
)
sql = f"""
UPDATE remote_media_cache
SET quarantined_by = ?
WHERE {sql_in_list_clause}"""
txn.execute(sql, [quarantined_by] + sql_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
total_media_quarantined = 0
if hashes:
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
txn.database_engine, "sha256", hashes
)
sql = f"""
UPDATE remote_media_cache
SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
def _quarantine_media_txn(
self,
txn: LoggingTransaction,
@@ -1253,49 +1146,40 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
Returns:
The total number of media items quarantined
"""
hashes = set()
media_ids = set()
remote_media = set()
# First, determine the hashes of the media we want to delete.
# We also want the media_ids for any media that lacks a hash.
if local_mxcs:
hash_sql_many_clause_sql, hash_sql_many_clause_args = (
make_in_list_sql_clause(txn.database_engine, "media_id", local_mxcs)
# Update all the tables to set the quarantined_by flag
sql = """
UPDATE local_media_repository
SET quarantined_by = ?
WHERE media_id = ?
"""
# set quarantine
if quarantined_by is not None:
sql += "AND safe_from_quarantine = FALSE"
txn.executemany(
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
)
hash_sql = f"SELECT sha256, media_id FROM local_media_repository WHERE {hash_sql_many_clause_sql}"
if quarantined_by is not None:
hash_sql += " AND safe_from_quarantine = FALSE"
txn.execute(hash_sql, hash_sql_many_clause_args)
for sha256, media_id in txn:
if sha256:
hashes.add(sha256)
else:
media_ids.add(media_id)
# Do the same for remote media
if remote_mxcs:
hash_sql_in_list_clause, hash_sql_args = make_tuple_in_list_sql_clause(
txn.database_engine,
("media_origin", "media_id"),
remote_mxcs,
# remove from quarantine
else:
txn.executemany(
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
)
hash_sql = f"SELECT sha256, media_origin, media_id FROM remote_media_cache WHERE {hash_sql_in_list_clause}"
txn.execute(hash_sql, hash_sql_args)
for sha256, media_origin, media_id in txn:
if sha256:
hashes.add(sha256)
else:
remote_media.add((media_origin, media_id))
# Note that a rowcount of -1 can be used to indicate no rows were affected.
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
count = self._quarantine_local_media_txn(txn, hashes, media_ids, quarantined_by)
count += self._quarantine_remote_media_txn(
txn, hashes, remote_media, quarantined_by
txn.executemany(
"""
UPDATE remote_media_cache
SET quarantined_by = ?
WHERE media_origin = ? AND media_id = ?
""",
[(quarantined_by, origin, media_id) for origin, media_id in remote_mxcs],
)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return count
return total_media_quarantined
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked.

View File

@@ -1622,11 +1622,14 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
sql = """
UPDATE room_memberships
SET participant = true
WHERE event_id IN (
SELECT event_id FROM local_current_membership
WHERE user_id = ? AND room_id = ?
WHERE (user_id, room_id) IN (
SELECT user_id, room_id
FROM room_memberships
WHERE user_id = ?
AND room_id = ?
ORDER BY event_stream_ordering DESC
LIMIT 1
)
AND NOT participant
"""
txn.execute(sql, (user_id, room_id))
@@ -1648,10 +1651,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
) -> bool:
sql = """
SELECT participant
FROM local_current_membership AS l
INNER JOIN room_memberships AS r USING (event_id)
WHERE l.user_id = ?
AND l.room_id = ?
FROM room_memberships
WHERE user_id = ?
AND room_id = ?
ORDER BY event_stream_ordering DESC
LIMIT 1
"""
txn.execute(sql, (user_id, room_id))
res = txn.fetchone()

View File

@@ -20,15 +20,7 @@
#
import logging
from typing import (
TYPE_CHECKING,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union
from synapse.logging.opentracing import tag_args, trace
from synapse.storage._base import SQLBaseStore

View File

@@ -321,42 +321,18 @@ class StateDeletionDataStore:
async def mark_state_groups_as_pending_deletion(
self, state_groups: Collection[int]
) -> None:
"""Mark the given state groups as pending deletion.
If any of the state groups are already pending deletion, then those records are
left as is.
"""
await self.db_pool.runInteraction(
"mark_state_groups_as_pending_deletion",
self._mark_state_groups_as_pending_deletion_txn,
state_groups,
)
def _mark_state_groups_as_pending_deletion_txn(
self,
txn: LoggingTransaction,
state_groups: Collection[int],
) -> None:
sql = """
INSERT INTO state_groups_pending_deletion (state_group, insertion_ts)
VALUES %s
ON CONFLICT (state_group)
DO NOTHING
"""
"""Mark the given state groups as pending deletion"""
now = self._clock.time_msec()
rows = [
(
state_group,
now,
)
for state_group in state_groups
]
if isinstance(txn.database_engine, PostgresEngine):
txn.execute_values(sql % ("?",), rows, fetch=False)
else:
txn.execute_batch(sql % ("(?, ?)",), rows)
await self.db_pool.simple_upsert_many(
table="state_groups_pending_deletion",
key_names=("state_group",),
key_values=[(state_group,) for state_group in state_groups],
value_names=("insertion_ts",),
value_values=[(now,) for _ in state_groups],
desc="mark_state_groups_as_pending_deletion",
)
async def mark_state_groups_as_used(self, state_groups: Collection[int]) -> None:
"""Mark the given state groups as now being referenced"""

View File

@@ -48,7 +48,6 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import MutableStateMap, StateKey, StateMap
@@ -915,12 +914,6 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
) -> None:
# Delete all edges that reference a state group linked to room_id
logger.info("[purge] removing %s from state_group_edges", room_id)
if isinstance(self.database_engine, PostgresEngine):
# Disable statement timeouts for this transaction; purging rooms can
# take a while!
txn.execute("SET LOCAL statement_timeout = 0")
txn.execute(
"""
DELETE FROM state_group_edges AS sge WHERE sge.state_group IN (

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 91 # remember to update the list below when updating
SCHEMA_VERSION = 90 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -161,7 +161,6 @@ Changes in SCHEMA_VERSION = 89
Changes in SCHEMA_VERSION = 90
- Add a column `participant` to `room_memberships` table
- Add background update to delete unreferenced state groups.
"""

View File

@@ -1,28 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Store the SHA256 content hash of media files.
ALTER TABLE local_media_repository ADD COLUMN sha256 TEXT;
ALTER TABLE remote_media_cache ADD COLUMN sha256 TEXT;
-- Add a background updates to handle creating the new index.
--
-- Note that the ordering of the update is not following the usual scheme. This
-- is because when upgrading from Synapse 1.127, this index is fairly important
-- to have up quickly, so that it doesn't tank performance, which is why it is
-- scheduled before other background updates in the 1.127 -> 1.128 upgrade
INSERT INTO
background_updates (ordering, update_name, progress_json)
VALUES
(8890, 'local_media_repository_sha256_idx', '{}'),
(8891, 'remote_media_cache_sha256_idx', '{}');

View File

@@ -1,16 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add a background update to delete any unreferenced state groups
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(9002, 'mark_unreferenced_state_groups_for_deletion_bg_update', '{}');

View File

@@ -1,15 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Remove the old unreferenced state group deletion background update if it exists
DELETE FROM background_updates WHERE update_name = 'delete_unreferenced_state_groups_bg_update';

View File

@@ -48,7 +48,3 @@ class _BackgroundUpdates:
SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE = (
"sliding_sync_membership_snapshots_fix_forgotten_column_bg_update"
)
MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = (
"mark_unreferenced_state_groups_for_deletion_bg_update"
)

View File

@@ -101,13 +101,7 @@ class ResponseCache(Generic[KV]):
used rather than trying to compute a new response.
"""
def __init__(
self,
clock: Clock,
name: str,
timeout_ms: float = 0,
enable_logging: bool = True,
):
def __init__(self, clock: Clock, name: str, timeout_ms: float = 0):
self._result_cache: Dict[KV, ResponseCacheEntry] = {}
self.clock = clock
@@ -115,7 +109,6 @@ class ResponseCache(Generic[KV]):
self._name = name
self._metrics = register_cache("response_cache", name, self, resizable=False)
self._enable_logging = enable_logging
def size(self) -> int:
return len(self._result_cache)
@@ -253,12 +246,9 @@ class ResponseCache(Generic[KV]):
"""
entry = self._get(key)
if not entry:
if self._enable_logging:
logger.debug(
"[%s]: no cached result for [%s], calculating new one",
self._name,
key,
)
logger.debug(
"[%s]: no cached result for [%s], calculating new one", self._name, key
)
context = ResponseCacheContext(cache_key=key)
if cache_context:
kwargs["cache_context"] = context
@@ -279,15 +269,12 @@ class ResponseCache(Generic[KV]):
return await make_deferred_yieldable(entry.result.observe())
result = entry.result.observe()
if self._enable_logging:
if result.called:
logger.info(
"[%s]: using completed cached result for [%s]", self._name, key
)
else:
logger.info(
"[%s]: using incomplete cached result for [%s]", self._name, key
)
if result.called:
logger.info("[%s]: using completed cached result for [%s]", self._name, key)
else:
logger.info(
"[%s]: using incomplete cached result for [%s]", self._name, key
)
span_context = entry.opentracing_span_context
with start_active_span_follows_from(

View File

@@ -220,7 +220,9 @@ class TestRatelimiter(unittest.HomeserverTestCase):
self.assertIn("test_id_1", limiter.actions)
self.reactor.advance(60)
self.get_success_or_raise(
limiter.can_do_action(None, key="test_id_2", _time_now_s=10)
)
self.assertNotIn("test_id_1", limiter.actions)

View File

@@ -539,44 +539,6 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)
self.assertEqual(error.value.code, 503)
def test_cached_expired_introspection(self) -> None:
"""The handler should raise an error if the introspection response gives
an expiry time, the introspection response is cached and then the entry is
re-requested after it has expired."""
self.http_client.request = introspection_mock = AsyncMock(
return_value=FakeResponse.json(
code=200,
payload={
"active": True,
"sub": SUBJECT,
"scope": " ".join(
[
MATRIX_USER_SCOPE,
f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC",
]
),
"username": USERNAME,
"expires_in": 60,
},
)
)
request = Mock(args={})
request.args[b"access_token"] = [b"mockAccessToken"]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
# The first CS-API request causes a successful introspection
self.get_success(self.auth.get_user_by_req(request))
self.assertEqual(introspection_mock.call_count, 1)
# Sleep for 60 seconds so the token expires.
self.reactor.advance(60.0)
# Now the CS-API request fails because the token expired
self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
# Ensure another introspection request was not sent
self.assertEqual(introspection_mock.call_count, 1)
def make_device_keys(self, user_id: str, device_id: str) -> JsonDict:
# We only generate a master key to simplify the test.
master_signing_key = generate_signing_key(device_id)

View File

@@ -484,32 +484,6 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertEqual(code_verifier, "")
self.assertEqual(redirect, "http://client/redirect")
@override_config(
{
"oidc_config": {
**DEFAULT_CONFIG,
"passthrough_authorization_parameters": ["additional_parameter"],
}
}
)
def test_passthrough_parameters(self) -> None:
"""The redirect request has additional parameters, one is authorized, one is not"""
req = Mock(spec=["cookies", "args"])
req.cookies = []
req.args = {}
req.args[b"additional_parameter"] = ["a_value".encode("utf-8")]
req.args[b"not_authorized_parameter"] = ["any".encode("utf-8")]
url = urlparse(
self.get_success(
self.provider.handle_redirect_request(req, b"http://client/redirect")
)
)
params = parse_qs(url.query)
self.assertEqual(params["additional_parameter"], ["a_value"])
self.assertNotIn("not_authorized_parameters", params)
@override_config({"oidc_config": DEFAULT_CONFIG})
def test_redirect_request_with_code_challenge(self) -> None:
"""The redirect request has the right arguments & generates a valid session cookie."""

View File

@@ -369,7 +369,6 @@ class ProfileTestCase(unittest.HomeserverTestCase):
time_now_ms=self.clock.time_msec(),
upload_name=None,
filesystem_id="xyz",
sha256="abcdefg12345",
)
)

View File

@@ -31,9 +31,6 @@ from synapse.rest.client import login, register, room
from synapse.server import HomeServer
from synapse.types import UserID
from synapse.util import Clock
from synapse.util.stringutils import (
random_string,
)
from tests import unittest
from tests.unittest import override_config
@@ -68,6 +65,7 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
# quarantined media) into both the local store and the remote cache, plus
# one additional local media that is marked as protected from quarantine.
media_repository = hs.get_media_repository()
test_media_content = b"example string"
def _create_media_and_set_attributes(
last_accessed_ms: Optional[int],
@@ -75,14 +73,12 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
is_protected: Optional[bool] = False,
) -> MXCUri:
# "Upload" some media to the local media store
# If the meda
random_content = bytes(random_string(24), "utf-8")
mxc_uri: MXCUri = self.get_success(
media_repository.create_content(
media_type="text/plain",
upload_name=None,
content=io.BytesIO(random_content),
content_length=len(random_content),
content=io.BytesIO(test_media_content),
content_length=len(test_media_content),
auth_user=UserID.from_string(test_user_id),
)
)
@@ -133,7 +129,6 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
time_now_ms=clock.time_msec(),
upload_name="testfile.txt",
filesystem_id="abcdefg12345",
sha256=random_string(24),
)
)

View File

@@ -42,7 +42,6 @@ from twisted.web.resource import Resource
from synapse.api.errors import Codes, HttpResponseException
from synapse.api.ratelimiting import Ratelimiter
from synapse.events import EventBase
from synapse.http.client import ByteWriteable
from synapse.http.types import QueryParams
from synapse.logging.context import make_deferred_yieldable
from synapse.media._base import FileInfo, ThumbnailInfo
@@ -60,7 +59,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import FakeChannel
from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG, SMALL_PNG_SHA256
from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG
from tests.unittest import override_config
from tests.utils import default_config
@@ -1258,107 +1257,3 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
)
assert channel.code == 502
assert channel.json_body["errcode"] == "M_TOO_LARGE"
def read_body(
response: IResponse, stream: ByteWriteable, max_size: Optional[int]
) -> Deferred:
d: Deferred = defer.Deferred()
stream.write(SMALL_PNG)
d.callback(len(SMALL_PNG))
return d
class MediaHashesTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
media.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.user = self.register_user("user", "pass")
self.tok = self.login("user", "pass")
self.store = hs.get_datastores().main
self.client = hs.get_federation_http_client()
def create_resource_dict(self) -> Dict[str, Resource]:
resources = super().create_resource_dict()
resources["/_matrix/media"] = self.hs.get_media_repository_resource()
return resources
def test_ensure_correct_sha256(self) -> None:
"""Check that the hash does not change"""
media = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200)
mxc = media.get("content_uri")
assert mxc
store_media = self.get_success(self.store.get_local_media(mxc[11:]))
assert store_media
self.assertEqual(
store_media.sha256,
SMALL_PNG_SHA256,
)
def test_ensure_multiple_correct_sha256(self) -> None:
"""Check that two media items have the same hash."""
media_a = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200)
mxc_a = media_a.get("content_uri")
assert mxc_a
store_media_a = self.get_success(self.store.get_local_media(mxc_a[11:]))
assert store_media_a
media_b = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200)
mxc_b = media_b.get("content_uri")
assert mxc_b
store_media_b = self.get_success(self.store.get_local_media(mxc_b[11:]))
assert store_media_b
self.assertNotEqual(
store_media_a.media_id,
store_media_b.media_id,
)
self.assertEqual(
store_media_a.sha256,
store_media_b.sha256,
)
@override_config(
{
"enable_authenticated_media": False,
}
)
# mock actually reading file body
@patch(
"synapse.http.matrixfederationclient.read_body_with_max_size",
read_body,
)
def test_ensure_correct_sha256_federated(self) -> None:
"""Check that federated media have the same hash."""
# Mock getting a file over federation
async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
resp = MagicMock(spec=IResponse)
resp.code = 200
resp.length = 500
resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
resp.phrase = b"OK"
return resp
self.client._send_request = _send_request # type: ignore
# first request should go through
channel = self.make_request(
"GET",
"/_matrix/media/v3/download/remote.org/abc",
shorthand=False,
access_token=self.tok,
)
assert channel.code == 200
store_media = self.get_success(
self.store.get_cached_remote_media("remote.org", "abc")
)
assert store_media
self.assertEqual(
store_media.sha256,
SMALL_PNG_SHA256,
)

View File

@@ -1167,81 +1167,3 @@ class HTTPPusherTests(HomeserverTestCase):
self.assertEqual(
self.push_attempts[0][2]["notification"]["counts"]["unread"], 1
)
def test_push_backoff(self) -> None:
"""
The HTTP pusher will backoff correctly if it fails to contact the pusher.
"""
# Register the user who gets notified
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
# Register the user who sends the message
other_user_id = self.register_user("otheruser", "pass")
other_access_token = self.login("otheruser", "pass")
# Register the pusher
user_tuple = self.get_success(
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
device_display_name="pushy push",
pushkey="a@example.com",
lang=None,
data={"url": "http://example.com/_matrix/push/v1/notify"},
)
)
# Create a room with the other user
room = self.helper.create_room_as(user_id, tok=access_token)
self.helper.join(room=room, user=other_user_id, tok=other_access_token)
# The other user sends some messages
self.helper.send(room, body="Message 1", tok=other_access_token)
# One push was attempted to be sent
self.assertEqual(len(self.push_attempts), 1)
self.assertEqual(
self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify"
)
self.assertEqual(
self.push_attempts[0][2]["notification"]["content"]["body"], "Message 1"
)
self.push_attempts[0][0].callback({})
self.pump()
# Send another message, this time it fails
self.helper.send(room, body="Message 2", tok=other_access_token)
self.assertEqual(len(self.push_attempts), 2)
self.push_attempts[1][0].errback(Exception("couldn't connect"))
self.pump()
# Sending yet another message doesn't trigger a push immediately
self.helper.send(room, body="Message 3", tok=other_access_token)
self.pump()
self.assertEqual(len(self.push_attempts), 2)
# .. but waiting for a bit will cause more pushes
self.reactor.advance(10)
self.assertEqual(len(self.push_attempts), 3)
self.assertEqual(
self.push_attempts[2][2]["notification"]["content"]["body"], "Message 2"
)
self.push_attempts[2][0].callback({})
self.pump()
self.assertEqual(len(self.push_attempts), 4)
self.assertEqual(
self.push_attempts[3][2]["notification"]["content"]["body"], "Message 3"
)
self.push_attempts[3][0].callback({})

View File

@@ -20,7 +20,7 @@
#
import urllib.parse
from typing import Dict, cast
from typing import Dict
from parameterized import parameterized
@@ -32,7 +32,6 @@ from synapse.http.server import JsonResource
from synapse.rest.admin import VersionServlet
from synapse.rest.client import login, media, room
from synapse.server import HomeServer
from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
@@ -228,25 +227,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Upload some media
response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok)
response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok)
response_3 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok)
# Extract media IDs
server_and_media_id_1 = response_1["content_uri"][6:]
server_and_media_id_2 = response_2["content_uri"][6:]
server_and_media_id_3 = response_3["content_uri"][6:]
# Remove the hash from the media to simulate historic media.
self.get_success(
self.hs.get_datastores().main.update_local_media(
media_id=server_and_media_id_3.split("/")[1],
media_type="image/png",
upload_name=None,
media_length=123,
user_id=UserID.from_string(non_admin_user),
# Hack to force some media to have no hash.
sha256=cast(str, None),
)
)
# Quarantine all media by this user
url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote(
@@ -260,13 +244,12 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
self.pump(1.0)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body, {"num_quarantined": 3}, "Expected 3 quarantined items"
channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items"
)
# Attempt to access each piece of media
self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
self._ensure_quarantined(admin_user_tok, server_and_media_id_2)
self._ensure_quarantined(admin_user_tok, server_and_media_id_3)
def test_cannot_quarantine_safe_media(self) -> None:
self.register_user("user_admin", "pass", admin=True)

View File

@@ -35,7 +35,7 @@ from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG
from tests.test_utils import SMALL_PNG
from tests.unittest import override_config
VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds
@@ -598,27 +598,23 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests):
class QuarantineMediaByIDTestCase(_AdminMediaTests):
def upload_media_and_return_media_id(self, data: bytes) -> str:
# Upload some media into the room
response = self.helper.upload_media(
data,
tok=self.admin_user_tok,
expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
return server_and_media_id.split("/")[1]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.server_name = hs.hostname
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.media_id = self.upload_media_and_return_media_id(SMALL_PNG)
self.media_id_2 = self.upload_media_and_return_media_id(SMALL_PNG)
self.media_id_3 = self.upload_media_and_return_media_id(SMALL_PNG)
self.media_id_other = self.upload_media_and_return_media_id(SMALL_CMYK_JPEG)
# Upload some media into the room
response = self.helper.upload_media(
SMALL_PNG,
tok=self.admin_user_tok,
expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
self.media_id = server_and_media_id.split("/")[1]
self.url = "/_synapse/admin/v1/media/%s/%s/%s"
@parameterized.expand(["quarantine", "unquarantine"])
@@ -690,52 +686,6 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
assert media_info is not None
self.assertFalse(media_info.quarantined_by)
def test_quarantine_media_match_hash(self) -> None:
"""
Tests that quarantining removes all media with the same hash
"""
media_info = self.get_success(self.store.get_local_media(self.media_id))
assert media_info is not None
self.assertFalse(media_info.quarantined_by)
# quarantining
channel = self.make_request(
"POST",
self.url % ("quarantine", self.server_name, self.media_id),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
# Test that ALL similar media was quarantined.
for media in [self.media_id, self.media_id_2, self.media_id_3]:
media_info = self.get_success(self.store.get_local_media(media))
assert media_info is not None
self.assertTrue(media_info.quarantined_by)
# Test that other media was not.
media_info = self.get_success(self.store.get_local_media(self.media_id_other))
assert media_info is not None
self.assertFalse(media_info.quarantined_by)
# remove from quarantine
channel = self.make_request(
"POST",
self.url % ("unquarantine", self.server_name, self.media_id),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
# Test that ALL similar media is now reset.
for media in [self.media_id, self.media_id_2, self.media_id_3]:
media_info = self.get_success(self.store.get_local_media(media))
assert media_info is not None
self.assertFalse(media_info.quarantined_by)
def test_quarantine_protected_media(self) -> None:
"""
Tests that quarantining from protected media fails

Some files were not shown because too many files have changed in this diff Show More