Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8a69993893 | |||
| f9eff0766d | |||
| d18edf67d6 | |||
| fd5d3d852d | |||
| ea376126a0 | |||
| 74be5cfdbc | |||
| f2ca2e31f7 | |||
| 6dc1ecd359 | |||
| 2965c9970c | |||
| d59bbd8b6b | |||
| 7be6c711d4 | |||
| 5ab05e7b95 | |||
| 7563b2a2a3 | |||
| 4097ada89f | |||
| f79811ed80 | |||
| 4eaab31757 | |||
| ad140130cc | |||
| e47de2b32d | |||
| 0384fd72ee | |||
| 75832f25b0 | |||
| 7346760aed | |||
| b0795d0cb6 | |||
| 2ef7824620 | |||
| 39e17856a3 | |||
| 4c958c679a | |||
| a87981f673 | |||
| 2ff977a6c3 | |||
| 1482ad1917 | |||
| 5b89c92643 | |||
| 33824495ba | |||
| 89cb613a4e | |||
| c16a981f22 | |||
| 0046d7278b | |||
| 2c7a61e311 | |||
| 45420b1d42 | |||
| 19b0e23c3d | |||
| a832375bfb | |||
| ae701e1709 | |||
| dd05cc55ee | |||
| 081f6ad50f | |||
| b30fcb03cc | |||
| 0e3c0aeee8 | |||
| 5c84f25809 | |||
| 770768614b | |||
| b8b3896b1d | |||
| 01efc49554 | |||
| fa53a8512a | |||
| fdbcb821ff | |||
| 8eb991b746 | |||
| 87d374c639 | |||
| 1709234311 | |||
| 80b62d7903 | |||
| 7ace290f07 | |||
| 2f812c2eb6 | |||
| 90f346183a | |||
| f638a76ba4 | |||
| cf02b8fea5 | |||
| 1deb6e03e0 | |||
| 02eed668b8 | |||
| 9f8ed14535 | |||
| 3bc04d05a4 | |||
| 4dba011c31 | |||
| 76ffd3ba01 | |||
| 3c188231c7 | |||
| d17295e5c3 | |||
| a39b856cf0 | |||
| 2830013e5e | |||
| ecc09b15f1 | |||
| 31110f35d9 | |||
| 2277df2a1e | |||
| 5e83434f3a | |||
| a227d20c25 | |||
| bd08a01fc8 | |||
| 92a29dcffc | |||
| 2719bd1794 | |||
| 7af299b365 | |||
| d8fef721a0 |
@@ -30,7 +30,7 @@ jobs:
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
|
||||
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
@@ -44,6 +44,6 @@ jobs:
|
||||
- run: cargo fmt
|
||||
continue-on-error: true
|
||||
|
||||
- uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0
|
||||
- uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0
|
||||
with:
|
||||
commit_message: "Attempt to fix linting"
|
||||
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- id: set-distros
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
|
||||
path: debs/*
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
@@ -165,7 +165,7 @@ jobs:
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: ./wheelhouse/*.whl
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@b14cf4c92620c250e1c074ab0a5800e37df86765 # v4.2.0
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
- name: Build a tarball for the debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
@@ -167,7 +167,7 @@ jobs:
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
@@ -192,7 +192,7 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
@@ -279,7 +279,7 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
@@ -327,7 +327,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
@@ -414,7 +414,7 @@ jobs:
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
@@ -529,7 +529,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
@@ -627,7 +627,7 @@ jobs:
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
- name: "Upload schema differences"
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
|
||||
with:
|
||||
name: Schema dumps
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@f5473ace9aeee8b97717b281e26980aa5097023f # main (v1.0.2 + 10 commits)
|
||||
- uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
|
||||
+72
@@ -1,3 +1,75 @@
|
||||
# Synapse 1.128.0 (2025-04-08)
|
||||
|
||||
No significant changes since 1.128.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.128.0rc1 (2025-04-01)
|
||||
|
||||
### Features
|
||||
|
||||
- Add an access token introspection cache to make Matrix Authentication Service integration ([MSC3861](https://github.com/matrix-org/matrix-doc/pull/3861)) more efficient. ([\#18231](https://github.com/element-hq/synapse/issues/18231))
|
||||
- Add background job to clear unreferenced state groups. ([\#18254](https://github.com/element-hq/synapse/issues/18254))
|
||||
- Hashes of media files are now tracked by Synapse. Media quarantines will now apply to all files with the same hash. ([\#18277](https://github.com/element-hq/synapse/issues/18277), [\#18302](https://github.com/element-hq/synapse/issues/18302), [\#18296](https://github.com/element-hq/synapse/issues/18296))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add index to sliding sync ([MSC4186](https://github.com/matrix-org/matrix-doc/pull/4186)) membership snapshot table, to fix a performance issue. ([\#18074](https://github.com/element-hq/synapse/issues/18074))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Specify the architecture of installed packages via an APT config option, which is more reliable than appending package names with `:{arch}`. ([\#18271](https://github.com/element-hq/synapse/issues/18271))
|
||||
- Always specify base image debian versions with a build argument. ([\#18272](https://github.com/element-hq/synapse/issues/18272))
|
||||
- Allow passing arguments to `start_for_complement.sh` (to be sent to `configure_workers_and_start.py`). ([\#18273](https://github.com/element-hq/synapse/issues/18273))
|
||||
- Make some improvements to the `prefix-log` script in the workers image. ([\#18274](https://github.com/element-hq/synapse/issues/18274))
|
||||
- Use `uv pip` to install `supervisor` in the worker image. ([\#18275](https://github.com/element-hq/synapse/issues/18275))
|
||||
- Avoid needing to download & use `rsync` in a build layer. ([\#18287](https://github.com/element-hq/synapse/issues/18287))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix how to obtain access token and change naming from riot to element ([\#18225](https://github.com/element-hq/synapse/issues/18225))
|
||||
- Correct a small typo in the SSO mapping providers documentation. ([\#18276](https://github.com/element-hq/synapse/issues/18276))
|
||||
- Add docs for how to clear out the Poetry wheel cache. ([\#18283](https://github.com/element-hq/synapse/issues/18283))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add a column `participant` to `room_memberships` table. ([\#18068](https://github.com/element-hq/synapse/issues/18068))
|
||||
- Update Poetry to 2.1.1, including updating the lock file version. ([\#18251](https://github.com/element-hq/synapse/issues/18251))
|
||||
- Pin GitHub Actions dependencies by commit hash. ([\#18255](https://github.com/element-hq/synapse/issues/18255))
|
||||
- Add DB delta to remove the old state group deletion job. ([\#18284](https://github.com/element-hq/synapse/issues/18284))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from f5473ace9aeee8b97717b281e26980aa5097023f to 280af8ae1f83a494cfad2cb10f02f6d13529caa9. ([\#18303](https://github.com/element-hq/synapse/issues/18303))
|
||||
* Bump actions/cache from 4.2.2 to 4.2.3. ([\#18266](https://github.com/element-hq/synapse/issues/18266))
|
||||
* Bump actions/download-artifact from 4.2.0 to 4.2.1. ([\#18268](https://github.com/element-hq/synapse/issues/18268))
|
||||
* Bump actions/setup-python from 5.4.0 to 5.5.0. ([\#18298](https://github.com/element-hq/synapse/issues/18298))
|
||||
* Bump actions/upload-artifact from 4.6.1 to 4.6.2. ([\#18304](https://github.com/element-hq/synapse/issues/18304))
|
||||
* Bump authlib from 1.4.1 to 1.5.1. ([\#18306](https://github.com/element-hq/synapse/issues/18306))
|
||||
* Bump dawidd6/action-download-artifact from 8 to 9. ([\#18204](https://github.com/element-hq/synapse/issues/18204))
|
||||
* Bump jinja2 from 3.1.5 to 3.1.6. ([\#18223](https://github.com/element-hq/synapse/issues/18223))
|
||||
* Bump log from 0.4.26 to 0.4.27. ([\#18267](https://github.com/element-hq/synapse/issues/18267))
|
||||
* Bump phonenumbers from 8.13.50 to 9.0.2. ([\#18299](https://github.com/element-hq/synapse/issues/18299))
|
||||
* Bump pygithub from 2.5.0 to 2.6.1. ([\#18243](https://github.com/element-hq/synapse/issues/18243))
|
||||
* Bump pyo3-log from 0.12.1 to 0.12.2. ([\#18269](https://github.com/element-hq/synapse/issues/18269))
|
||||
|
||||
# Synapse 1.127.1 (2025-03-26)
|
||||
|
||||
## Security
|
||||
- Fix [CVE-2025-30355](https://www.cve.org/CVERecord?id=CVE-2025-30355) / [GHSA-v56r-hwv5-mxg6](https://github.com/element-hq/synapse/security/advisories/GHSA-v56r-hwv5-mxg6). **High severity vulnerability affecting federation. The vulnerability has been exploited in the wild.**
|
||||
|
||||
|
||||
|
||||
# Synapse 1.127.0 (2025-03-25)
|
||||
|
||||
No significant changes since 1.127.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.127.0rc1 (2025-03-18)
|
||||
|
||||
### Features
|
||||
|
||||
Generated
+6
-6
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.97"
|
||||
version = "1.0.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -223,9 +223,9 @@ checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.26"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -316,9 +316,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.12.1"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be5bb22b77965a7b5394e9aae9897a0607b51df5167561ffc3b02643b4200bc7"
|
||||
checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
|
||||
+7
-5
@@ -253,15 +253,17 @@ Alongside all that, join our developer community on Matrix:
|
||||
Copyright and Licensing
|
||||
=======================
|
||||
|
||||
Copyright 2014-2017 OpenMarket Ltd
|
||||
Copyright 2017 Vector Creations Ltd
|
||||
Copyright 2017-2025 New Vector Ltd
|
||||
| Copyright 2014-2017 OpenMarket Ltd
|
||||
| Copyright 2017 Vector Creations Ltd
|
||||
| Copyright 2017-2025 New Vector Ltd
|
||||
|
|
||||
|
||||
This software is dual-licensed by New Vector Ltd (Element). It can be used either:
|
||||
|
||||
|
||||
(1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR
|
||||
|
||||
|
||||
(2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to).
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses.
|
||||
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add a column `participant` to `room_memberships` table.
|
||||
@@ -1 +0,0 @@
|
||||
Add index to sliding sync membership snapshot table, to fix a performance issue.
|
||||
@@ -0,0 +1 @@
|
||||
Disable statement timeout during room purge.
|
||||
@@ -0,0 +1 @@
|
||||
Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later.
|
||||
@@ -0,0 +1 @@
|
||||
Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
|
||||
@@ -0,0 +1 @@
|
||||
Improve formatting of the README file.
|
||||
@@ -0,0 +1 @@
|
||||
Add `passthrough_authorization_parameters` in OIDC configuration to allow to pass parameters to the authorization grant URL.
|
||||
@@ -0,0 +1 @@
|
||||
Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.
|
||||
@@ -1 +0,0 @@
|
||||
Update Poetry to 2.1.1, including updating the lock file version.
|
||||
@@ -1 +0,0 @@
|
||||
Add background job to clear unreferenced state groups.
|
||||
@@ -1 +0,0 @@
|
||||
Pin GitHub Actions dependencies by commit hash.
|
||||
@@ -0,0 +1 @@
|
||||
In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.
|
||||
@@ -0,0 +1 @@
|
||||
Optimize the build of the workers image.
|
||||
@@ -0,0 +1 @@
|
||||
In start_for_complement.sh, replace some external program calls with shell builtins.
|
||||
@@ -0,0 +1 @@
|
||||
Optimize the build of the complement-synapse image.
|
||||
@@ -0,0 +1 @@
|
||||
When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.
|
||||
@@ -0,0 +1 @@
|
||||
Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.
|
||||
@@ -0,0 +1 @@
|
||||
Fix `force_tracing_for_users` config when using delegated auth.
|
||||
@@ -0,0 +1 @@
|
||||
Fix the token introspection cache logging access tokens when MAS integration is in use.
|
||||
@@ -0,0 +1 @@
|
||||
Add cache to storage functions used to auth requests when using delegated auth.
|
||||
@@ -0,0 +1 @@
|
||||
Stop caching introspection failures when delegating auth to MAS.
|
||||
@@ -0,0 +1 @@
|
||||
Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic.
|
||||
@@ -0,0 +1 @@
|
||||
Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0.
|
||||
@@ -0,0 +1 @@
|
||||
Add support for handling `GET /devices/` on workers.
|
||||
@@ -0,0 +1 @@
|
||||
Allow `/rooms/` admin API to be run on workers.
|
||||
@@ -0,0 +1 @@
|
||||
Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.
|
||||
@@ -0,0 +1 @@
|
||||
Minor performance improvements to the notifier.
|
||||
@@ -0,0 +1 @@
|
||||
Slight performance increase when using the ratelimiter.
|
||||
@@ -0,0 +1 @@
|
||||
Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
|
||||
@@ -0,0 +1 @@
|
||||
Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers.
|
||||
@@ -0,0 +1 @@
|
||||
Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks.
|
||||
@@ -0,0 +1 @@
|
||||
Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token.
|
||||
@@ -0,0 +1 @@
|
||||
Migrate from deprecated `poetry.dev-dependencies` -> `poetry.group.dev.dependencies` in pyproject.toml.
|
||||
Vendored
+21
-2
@@ -1,8 +1,27 @@
|
||||
matrix-synapse-py3 (1.127.0~rc1+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.128.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.128.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Apr 2025 14:09:54 +0100
|
||||
|
||||
matrix-synapse-py3 (1.128.0~rc1) stable; urgency=medium
|
||||
|
||||
* Update Poetry to 2.1.1.
|
||||
* New synapse release 1.128.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Mar 2025 17:38:49 +0000
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Apr 2025 14:35:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.127.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Mar 2025 21:07:31 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.127.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Mar 2025 12:04:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.0~rc1) stable; urgency=medium
|
||||
|
||||
|
||||
+9
-9
@@ -134,7 +134,6 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
apt-get install -y --no-install-recommends rsync && \
|
||||
apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
|
||||
curl \
|
||||
gosu \
|
||||
@@ -148,14 +147,10 @@ RUN \
|
||||
for arch in arm64 amd64; do \
|
||||
mkdir -p /tmp/debs-${arch} && \
|
||||
cd /tmp/debs-${arch} && \
|
||||
apt-get download $(sed "s/$/:${arch}/" /tmp/pkg-list); \
|
||||
apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \
|
||||
done
|
||||
|
||||
# Extract the debs for each architecture
|
||||
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
|
||||
# libraries to the right place, else the `COPY` won't work.
|
||||
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
|
||||
# already present in the runtime image.
|
||||
RUN \
|
||||
for arch in arm64 amd64; do \
|
||||
mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \
|
||||
@@ -165,8 +160,6 @@ RUN \
|
||||
dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \
|
||||
dpkg --extract $deb /install-${arch}; \
|
||||
done; \
|
||||
rsync -avr /install-${arch}/lib/ /install-${arch}/usr/lib; \
|
||||
rm -rf /install-${arch}/lib /install-${arch}/lib64; \
|
||||
done
|
||||
|
||||
|
||||
@@ -183,7 +176,14 @@ LABEL org.opencontainers.image.documentation='https://github.com/element-hq/syna
|
||||
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
|
||||
|
||||
COPY --from=runtime-deps /install-${TARGETARCH} /
|
||||
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
|
||||
# libraries to the right place, else the `COPY` won't work.
|
||||
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
|
||||
# already present in the runtime image.
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/var /var
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
|
||||
+30
-20
@@ -2,18 +2,38 @@
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# first of all, we create a base image with dependencies which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM docker.io/library/debian:bookworm-slim AS deps_base
|
||||
FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
|
||||
|
||||
# Tell apt to keep downloaded package files, as we're using cache mounts.
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
nginx-light
|
||||
|
||||
RUN \
|
||||
# remove default page
|
||||
rm /etc/nginx/sites-enabled/default && \
|
||||
# have nginx log to stderr/out
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
|
||||
# (mounted as --mount=type=cache) and the target directory.
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
|
||||
|
||||
RUN mkdir -p /uv/etc/supervisor/conf.d
|
||||
|
||||
# Similarly, a base to copy the redis server from.
|
||||
#
|
||||
@@ -21,31 +41,21 @@ FROM docker.io/library/debian:bookworm-slim AS deps_base
|
||||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM docker.io/library/redis:7-bookworm AS redis_base
|
||||
FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
RUN mkdir -p /etc/supervisor/conf.d
|
||||
|
||||
# Copy over redis and nginx
|
||||
# Copy over dependencies
|
||||
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
|
||||
|
||||
COPY --from=deps_base /uv /
|
||||
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
|
||||
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
|
||||
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
COPY --from=deps_base /var/log/nginx /var/log/nginx
|
||||
# chown to allow non-root user to write to http-*-temp-path dirs
|
||||
COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
@@ -64,4 +74,4 @@ FROM $FROM
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
CMD ["/healthcheck.sh"]
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
# This is an intermediate image, to be built locally (not pulled from a registry).
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
|
||||
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
|
||||
|
||||
FROM $FROM
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
@@ -20,9 +23,9 @@ FROM $FROM
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
@@ -55,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
|
||||
|
||||
# Update the healthcheck to have a shorter check interval
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
CMD ["/healthcheck.sh"]
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
set -e
|
||||
|
||||
echo "Complement Synapse launcher"
|
||||
echo " Args: $@"
|
||||
echo " Args: $*"
|
||||
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
|
||||
|
||||
function log {
|
||||
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
|
||||
echo "$d $@"
|
||||
d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
|
||||
echo "$d $*"
|
||||
}
|
||||
|
||||
# Set the server name of the homeserver
|
||||
@@ -103,12 +103,11 @@ fi
|
||||
# Note that both the key and certificate are in PEM format (not DER).
|
||||
|
||||
# First generate a configuration file to set up a Subject Alternative Name.
|
||||
cat > /conf/server.tls.conf <<EOF
|
||||
echo "\
|
||||
.include /etc/ssl/openssl.cnf
|
||||
|
||||
[SAN]
|
||||
subjectAltName=DNS:${SERVER_NAME}
|
||||
EOF
|
||||
subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
|
||||
|
||||
# Generate an RSA key
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
@@ -123,12 +122,12 @@ openssl x509 -req -in /conf/server.tls.csr \
|
||||
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
|
||||
|
||||
# Assert that we have a Subject Alternative Name in the certificate.
|
||||
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
|
||||
# (the test will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
|
||||
|
||||
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
|
||||
export SYNAPSE_TLS_KEY=/conf/server.tls.key
|
||||
|
||||
# Run the script that writes the necessary config files and starts supervisord, which in turn
|
||||
# starts everything else
|
||||
exec /configure_workers_and_start.py
|
||||
exec /configure_workers_and_start.py "$@"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/local/bin/python
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
@@ -202,6 +202,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/federation/v1/version$",
|
||||
"^/_matrix/federation/(v1|v2)/event/",
|
||||
"^/_matrix/federation/(v1|v2)/state/",
|
||||
"^/_matrix/federation/(v1|v2)/state_ids/",
|
||||
@@ -376,9 +377,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
#
|
||||
# We use append mode in case the files have already been written to by something else
|
||||
# (for instance, as part of the instructions in a dockerfile).
|
||||
exists = os.path.isfile(dst)
|
||||
with open(dst, "a") as outfile:
|
||||
# In case the existing file doesn't end with a newline
|
||||
outfile.write("\n")
|
||||
if exists:
|
||||
outfile.write("\n")
|
||||
|
||||
outfile.write(rendered)
|
||||
|
||||
@@ -604,7 +607,7 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
@@ -998,6 +1001,7 @@ def generate_worker_files(
|
||||
"/healthcheck.sh",
|
||||
healthcheck_urls=healthcheck_urls,
|
||||
)
|
||||
os.chmod("/healthcheck.sh", 0o755)
|
||||
|
||||
# Ensure the logging directory exists
|
||||
log_dir = data_dir + "/logs"
|
||||
|
||||
+5
-2
@@ -10,6 +10,9 @@
|
||||
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
|
||||
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
|
||||
# confusion due to to interleaving of the different processes.
|
||||
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
|
||||
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
|
||||
prefixer() {
|
||||
mawk -W interactive '{printf("%s | %s\n", ENVIRON["SUPERVISOR_PROCESS_NAME"], $0); fflush() }'
|
||||
}
|
||||
exec 1> >(prefixer)
|
||||
exec 2> >(prefixer >&2)
|
||||
exec "$@"
|
||||
|
||||
@@ -46,6 +46,14 @@ to any local media, and any locally-cached copies of remote media.
|
||||
|
||||
The media file itself (and any thumbnails) is not deleted from the server.
|
||||
|
||||
Since Synapse 1.128.0, hashes of uploaded media are tracked. If this media
|
||||
is quarantined, Synapse will:
|
||||
|
||||
- Quarantine any media with a matching hash that has already been uploaded.
|
||||
- Quarantine any future media.
|
||||
- Quarantine any existing cached remote media.
|
||||
- Quarantine any future remote media.
|
||||
|
||||
## Quarantining media by ID
|
||||
|
||||
This API quarantines a single piece of local or remote media.
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
# Show scheduled tasks
|
||||
|
||||
This API returns information about scheduled tasks.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
The api is:
|
||||
```
|
||||
GET /_synapse/admin/v1/scheduled_tasks
|
||||
```
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"scheduled_tasks": [
|
||||
{
|
||||
"id": "GSA124oegf1",
|
||||
"action": "shutdown_room",
|
||||
"status": "complete",
|
||||
"timestamp_ms": 23423523,
|
||||
"resource_id": "!roomid",
|
||||
"result": "some result",
|
||||
"error": null
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Query parameters:**
|
||||
|
||||
* `action_name`: string - Is optional. Returns only the scheduled tasks with the given action name.
|
||||
* `resource_id`: string - Is optional. Returns only the scheduled tasks with the given resource id.
|
||||
* `status`: string - Is optional. Returns only the scheduled tasks matching the given status, one of
|
||||
- "scheduled" - Task is scheduled but not active
|
||||
- "active" - Task is active and probably running, and if not will be run on next scheduler loop run
|
||||
- "complete" - Task has completed successfully
|
||||
- "failed" - Task is over and either returned a failed status, or had an exception
|
||||
|
||||
* `max_timestamp`: int - Is optional. Returns only the scheduled tasks with a timestamp inferior to the specified one.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body along with a `200` HTTP status code:
|
||||
|
||||
* `id`: string - ID of scheduled task.
|
||||
* `action`: string - The name of the scheduled task's action.
|
||||
* `status`: string - The status of the scheduled task.
|
||||
* `timestamp_ms`: integer - The timestamp (in milliseconds since the unix epoch) of the given task - If the status is "scheduled" then this represents when it should be launched.
|
||||
Otherwise it represents the last time this task got a change of state.
|
||||
* `resource_id`: Optional string - The resource id of the scheduled task, if it possesses one
|
||||
* `result`: Optional Json - Any result of the scheduled task, if given
|
||||
* `error`: Optional string - If the task has the status "failed", the error associated with this failure
|
||||
@@ -150,6 +150,28 @@ $ poetry shell
|
||||
$ poetry install --extras all
|
||||
```
|
||||
|
||||
If you want to go even further and remove the Poetry caches:
|
||||
|
||||
```shell
|
||||
# Find your Poetry cache directory
|
||||
# Docs: https://github.com/python-poetry/poetry/blob/main/docs/configuration.md#cache-directory
|
||||
$ poetry config cache-dir
|
||||
|
||||
# Remove packages from all cached repositories
|
||||
$ poetry cache clear --all .
|
||||
|
||||
# Go completely nuclear and clear out everything Poetry cache related
|
||||
# including the wheel artifacts which is not covered by the above command
|
||||
# (see https://github.com/python-poetry/poetry/issues/10304)
|
||||
#
|
||||
# This is necessary in order to rebuild or fetch new wheels. For example, if you update
|
||||
# the `icu` library in on your system, you will need to rebuild the PyICU Python package
|
||||
# in order to incorporate the correct dynamically linked library locations otherwise you
|
||||
# will run into errors like: `ImportError: libicui18n.so.75: cannot open shared object file: No such file or directory`
|
||||
$ rm -rf $(poetry config cache-dir)
|
||||
```
|
||||
|
||||
|
||||
## ...run a command in the `poetry` virtualenv?
|
||||
|
||||
Use `poetry run cmd args` when you need the python virtualenv context.
|
||||
|
||||
@@ -23,6 +23,7 @@ such as [Github][github-idp].
|
||||
[auth0]: https://auth0.com/
|
||||
[authentik]: https://goauthentik.io/
|
||||
[lemonldap]: https://lemonldap-ng.org/
|
||||
[pocket-id]: https://pocket-id.org/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
|
||||
@@ -624,6 +625,32 @@ oidc_providers:
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
### Pocket ID
|
||||
|
||||
[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys.
|
||||
1. Go to `OIDC Clients`
|
||||
2. Click on `Add OIDC Client`
|
||||
3. Add a name, for example `Synapse`
|
||||
4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain
|
||||
5. Click on `Save`
|
||||
6. Note down your `Client ID` and `Client secret`, these will be used later
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: pocket_id
|
||||
idp_name: Pocket ID
|
||||
issuer: "https://auth.example.org/" # Replace with your domain
|
||||
client_id: "your-client-id" # Replace with the "Client ID" you noted down before
|
||||
client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Shibboleth with OIDC Plugin
|
||||
|
||||
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
|
||||
|
||||
@@ -10,7 +10,7 @@ As an example, a SSO service may return the email address
|
||||
to turn that into a displayname when creating a Matrix user for this individual.
|
||||
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
|
||||
variations. As each Synapse configuration may want something different, this is
|
||||
where SAML mapping providers come into play.
|
||||
where SSO mapping providers come into play.
|
||||
|
||||
SSO mapping providers are currently supported for OpenID and SAML SSO
|
||||
configurations. Please see the details below for how to implement your own.
|
||||
|
||||
@@ -117,6 +117,16 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.130.0
|
||||
|
||||
## Documented endpoint which can be delegated to a federation worker
|
||||
|
||||
The endpoint `^/_matrix/federation/v1/version$` can be delegated to a federation
|
||||
worker. This is not new behaviour, but had not been documented yet. The
|
||||
[list of delegatable endpoints](workers.md#synapseappgeneric_worker) has
|
||||
been updated to include it. Make sure to check your reverse proxy rules if you
|
||||
are using workers.
|
||||
|
||||
# Upgrading to v1.126.0
|
||||
|
||||
## Room list publication rules change
|
||||
|
||||
@@ -160,7 +160,7 @@ Using the following curl command:
|
||||
```console
|
||||
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
|
||||
```
|
||||
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
|
||||
`<access-token>` - can be obtained in element by looking in All settings, clicking Help & About and down the bottom is:
|
||||
Access Token:\<click to reveal\>
|
||||
|
||||
`<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org
|
||||
|
||||
@@ -3672,6 +3672,9 @@ Options for each entry include:
|
||||
* `additional_authorization_parameters`: String to string dictionary that will be passed as
|
||||
additional parameters to the authorization grant URL.
|
||||
|
||||
* `passthrough_authorization_parameters`: List of parameters that will be passed through from the redirect endpoint
|
||||
to the authorization grant URL.
|
||||
|
||||
* `allow_existing_users`: set to true to allow a user logging in via OIDC to
|
||||
match a pre-existing account instead of failing. This could be used if
|
||||
switching from password logins to OIDC. Defaults to false.
|
||||
@@ -3798,6 +3801,7 @@ oidc_providers:
|
||||
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
additional_authorization_parameters:
|
||||
acr_values: 2fa
|
||||
passthrough_authorization_parameters: ["login_hint"]
|
||||
skip_verification: true
|
||||
enable_registration: true
|
||||
user_mapping_provider:
|
||||
@@ -4014,7 +4018,7 @@ This option has a number of sub-options. They are as follows:
|
||||
* `include_content`: Clients requesting push notifications can either have the body of
|
||||
the message sent in the notification poke along with other details
|
||||
like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
If clients choose the to have the body sent, this option controls whether the
|
||||
If clients choose to have the body sent, this option controls whether the
|
||||
notification request includes the content of the event (other details
|
||||
like the sender are still included). If `event_id_only` is enabled, it
|
||||
has no effect.
|
||||
|
||||
@@ -200,6 +200,7 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
|
||||
|
||||
# Federation requests
|
||||
^/_matrix/federation/v1/version$
|
||||
^/_matrix/federation/v1/event/
|
||||
^/_matrix/federation/v1/state/
|
||||
^/_matrix/federation/v1/state_ids/
|
||||
@@ -249,6 +250,7 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
@@ -280,6 +282,7 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
|
||||
Generated
+45
-44
@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.4.1"
|
||||
version = "1.5.1"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
|
||||
files = [
|
||||
{file = "Authlib-1.4.1-py2.py3-none-any.whl", hash = "sha256:edc29c3f6a3e72cd9e9f45fff67fc663a2c364022eb0371c003f22d5405915c1"},
|
||||
{file = "authlib-1.4.1.tar.gz", hash = "sha256:30ead9ea4993cdbab821dc6e01e818362f92da290c04c7f6a1940f86507a790d"},
|
||||
{file = "authlib-1.5.1-py2.py3-none-any.whl", hash = "sha256:8408861cbd9b4ea2ff759b00b6f02fd7d81ac5a56d0b2b22c08606c6049aae11"},
|
||||
{file = "authlib-1.5.1.tar.gz", hash = "sha256:5cbc85ecb0667312c1cdc2f9095680bb735883b123fb509fde1e65b1c5df972e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -451,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
|
||||
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
|
||||
@@ -494,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
|
||||
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
|
||||
@@ -544,7 +544,7 @@ description = "Python wrapper for hiredis"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"redis\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"redis\""
|
||||
files = [
|
||||
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
|
||||
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
|
||||
@@ -890,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
|
||||
]
|
||||
@@ -943,14 +943,14 @@ trio = ["async_generator ; python_version == \"3.6\"", "trio"]
|
||||
|
||||
[[package]]
|
||||
name = "jinja2"
|
||||
version = "3.1.5"
|
||||
version = "3.1.6"
|
||||
description = "A very fast and expressive template engine."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
|
||||
{file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
|
||||
{file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"},
|
||||
{file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1028,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||
files = [
|
||||
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
|
||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||
@@ -1044,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"url-preview\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"url-preview\""
|
||||
files = [
|
||||
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
|
||||
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
|
||||
@@ -1330,7 +1330,7 @@ description = "An LDAP3 auth provider for Synapse"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||
files = [
|
||||
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
|
||||
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
|
||||
@@ -1551,7 +1551,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
|
||||
]
|
||||
@@ -1588,14 +1588,14 @@ dev = ["jinja2"]
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.50"
|
||||
version = "9.0.2"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"},
|
||||
{file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"},
|
||||
{file = "phonenumbers-9.0.2-py2.py3-none-any.whl", hash = "sha256:dbcec6bdfdf3973f60b81dc0fcac3f7b1638f877ac42da4d7b46724ed413e2b9"},
|
||||
{file = "phonenumbers-9.0.2.tar.gz", hash = "sha256:f590ee2b729bdd9873ca2d52989466add14c9953b48805c0aeb408348d4d6224"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1709,7 +1709,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"postgres\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"postgres\""
|
||||
files = [
|
||||
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
|
||||
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
|
||||
@@ -1730,7 +1730,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||
files = [
|
||||
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
|
||||
]
|
||||
@@ -1746,7 +1746,7 @@ description = "A Simple library to enable psycopg2 compatability"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||
files = [
|
||||
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
|
||||
]
|
||||
@@ -1929,14 +1929,14 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.5.0"
|
||||
version = "2.6.1"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
|
||||
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
|
||||
{file = "PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3"},
|
||||
{file = "pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1969,7 +1969,7 @@ description = "Python extension wrapping the ICU C++ API"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"user-search\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"user-search\""
|
||||
files = [
|
||||
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
|
||||
]
|
||||
@@ -2018,7 +2018,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"cache-memory\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"cache-memory\""
|
||||
files = [
|
||||
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
|
||||
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
|
||||
@@ -2053,18 +2053,19 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "24.3.0"
|
||||
version = "25.0.0"
|
||||
description = "Python wrapper module around the OpenSSL library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
|
||||
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
|
||||
{file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
|
||||
{file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=41.0.5,<45"
|
||||
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
|
||||
@@ -2077,7 +2078,7 @@ description = "Python implementation of SAML Version 2 Standard"
|
||||
optional = true
|
||||
python-versions = ">=3.9,<4.0"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
|
||||
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
|
||||
@@ -2102,7 +2103,7 @@ description = "Extensions to the standard Python datetime module"
|
||||
optional = true
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||
@@ -2130,7 +2131,7 @@ description = "World timezone definitions, modern and historical"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
|
||||
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
|
||||
@@ -2494,7 +2495,7 @@ description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"sentry\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"sentry\""
|
||||
files = [
|
||||
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
|
||||
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
|
||||
@@ -2678,7 +2679,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
|
||||
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
|
||||
@@ -2694,7 +2695,7 @@ description = "Python bindings for the Apache Thrift RPC system"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
|
||||
]
|
||||
@@ -2756,7 +2757,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
|
||||
@@ -2890,7 +2891,7 @@ description = "non-blocking redis client for python"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"redis\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"redis\""
|
||||
files = [
|
||||
{file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"},
|
||||
{file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"},
|
||||
@@ -2956,14 +2957,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.23.0.20240813"
|
||||
version = "4.23.0.20241208"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
|
||||
{file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
|
||||
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
|
||||
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3007,14 +3008,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250121"
|
||||
version = "2.9.21.20250318"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"},
|
||||
{file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"},
|
||||
{file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"},
|
||||
{file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3218,7 +3219,7 @@ description = "An XML Schema validator and decoder"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
|
||||
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
import secrets
|
||||
import ssl
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from aiohttp import web
|
||||
from signedjson.key import decode_signing_key_base64
|
||||
from signedjson.types import SigningKey
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
routes = web.RouteTableDef()
|
||||
|
||||
JOIN_FLOW_PAGE = """
|
||||
<html>
|
||||
<body>
|
||||
<a href="/accept?redirect_url={redirect_url}" target="_self">Accept policy and join room</a>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
|
||||
SIGNING_KEY = decode_signing_key_base64(
|
||||
"ed25519", "p_afG2", "E+EmxfcqLYjlS20I5ZzjoYeN7oR9Qt/zitPGomU0hmA"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PolicyServer:
|
||||
server_name: str
|
||||
signing_key: SigningKey
|
||||
base_url: str
|
||||
token_store: dict[str, str] = field(default_factory=dict)
|
||||
|
||||
|
||||
@routes.get("/")
|
||||
async def hello(request):
|
||||
return web.Response(text="Hello, world")
|
||||
|
||||
|
||||
@routes.post("/_matrix/federation/unstable/re.jki.join_policy/request_join")
|
||||
async def request_join(request: web.Request) -> web.Response:
|
||||
policy_server: PolicyServer = request.app["policy_server"]
|
||||
return web.json_response({"url": policy_server.base_url + "/join_flow"})
|
||||
|
||||
|
||||
@routes.post("/_matrix/federation/unstable/re.jki.join_policy/sign_join")
|
||||
async def sign_join(request: web.Request) -> web.Response:
|
||||
policy_server: PolicyServer = request.app["policy_server"]
|
||||
|
||||
json_body = await request.json()
|
||||
if json_body["token"] not in policy_server.token_store:
|
||||
return web.json_response({}, status=403)
|
||||
|
||||
room_version_id = json_body["room_version"]
|
||||
event_json = json_body["event"]
|
||||
|
||||
room_version = KNOWN_ROOM_VERSIONS[room_version_id]
|
||||
|
||||
signatures = compute_event_signature(
|
||||
room_version=room_version,
|
||||
event_dict=event_json,
|
||||
signature_name=policy_server.server_name,
|
||||
signing_key=policy_server.signing_key,
|
||||
)
|
||||
|
||||
return web.json_response({"signatures": signatures[policy_server.server_name]})
|
||||
|
||||
|
||||
@routes.get("/join_flow")
|
||||
async def join_flow(request: web.Request) -> web.Response:
|
||||
redirect_url = request.query["redirect_url"]
|
||||
return web.Response(
|
||||
text=JOIN_FLOW_PAGE.format(redirect_url=redirect_url), content_type="text/html"
|
||||
)
|
||||
|
||||
|
||||
@routes.get("/accept")
|
||||
async def accept(request: web.Request) -> web.Response:
|
||||
policy_server: PolicyServer = request.app["policy_server"]
|
||||
|
||||
redirect_url = request.query["redirect_url"]
|
||||
|
||||
token = secrets.token_hex(16)
|
||||
policy_server.token_store[token] = "user_id"
|
||||
|
||||
# TODO: Use less dodgy URL creation
|
||||
if "?" in redirect_url:
|
||||
redirect_url += f"&token={token}"
|
||||
else:
|
||||
redirect_url += f"?token={token}"
|
||||
|
||||
return web.Response(
|
||||
text="Done!",
|
||||
status=307,
|
||||
headers={"location": redirect_url},
|
||||
)
|
||||
|
||||
|
||||
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
context.load_cert_chain(
|
||||
certfile="/home/erikj/git/synapse/demo/8080/localhost:8080.tls.crt",
|
||||
keyfile="/home/erikj/git/synapse/demo/8080/localhost:8080.tls.key",
|
||||
)
|
||||
|
||||
|
||||
app = web.Application()
|
||||
app["policy_server"] = PolicyServer(
|
||||
server_name="localhost:8865",
|
||||
signing_key=SIGNING_KEY,
|
||||
base_url="https://localhost:8865",
|
||||
)
|
||||
app.add_routes(routes)
|
||||
web.run_app(app, port=8865, ssl_context=context)
|
||||
+2
-2
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.127.0rc1"
|
||||
version = "1.128.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -315,7 +315,7 @@ all = [
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
# We pin development dependencies in poetry.lock so that our tests don't start
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
@@ -38,15 +39,16 @@ from synapse.api.errors import (
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
@@ -76,6 +78,61 @@ def scope_to_list(scope: str) -> List[str]:
|
||||
return scope.strip().split(" ")
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntrospectionResult:
|
||||
_inner: IntrospectionToken
|
||||
|
||||
# when we retrieved this token,
|
||||
# in milliseconds since the Unix epoch
|
||||
retrieved_at_ms: int
|
||||
|
||||
def is_active(self, now_ms: int) -> bool:
|
||||
if not self._inner.get("active"):
|
||||
return False
|
||||
|
||||
expires_in = self._inner.get("expires_in")
|
||||
if expires_in is None:
|
||||
return True
|
||||
if not isinstance(expires_in, int):
|
||||
raise InvalidClientTokenError("token `expires_in` is not an int")
|
||||
|
||||
absolute_expiry_ms = expires_in * 1000 + self.retrieved_at_ms
|
||||
return now_ms < absolute_expiry_ms
|
||||
|
||||
def get_scope_list(self) -> List[str]:
|
||||
value = self._inner.get("scope")
|
||||
if not isinstance(value, str):
|
||||
return []
|
||||
return scope_to_list(value)
|
||||
|
||||
def get_sub(self) -> Optional[str]:
|
||||
value = self._inner.get("sub")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_username(self) -> Optional[str]:
|
||||
value = self._inner.get("username")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_name(self) -> Optional[str]:
|
||||
value = self._inner.get("name")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_device_id(self) -> Optional[str]:
|
||||
value = self._inner.get("device_id")
|
||||
if value is not None and not isinstance(value, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid device ID in introspection result",
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc]
|
||||
"""An implementation of the private_key_jwt client auth method that includes a kid header.
|
||||
|
||||
@@ -120,6 +177,34 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
# # Token Introspection Cache
|
||||
# This remembers what users/devices are represented by which access tokens,
|
||||
# in order to reduce overall system load:
|
||||
# - on Synapse (as requests are relatively expensive)
|
||||
# - on the network
|
||||
# - on MAS
|
||||
#
|
||||
# Since there is no invalidation mechanism currently,
|
||||
# the entries expire after 2 minutes.
|
||||
# This does mean tokens can be treated as valid by Synapse
|
||||
# for longer than reality.
|
||||
#
|
||||
# Ideally, tokens should logically be invalidated in the following circumstances:
|
||||
# - If a session logout happens.
|
||||
# In this case, MAS will delete the device within Synapse
|
||||
# anyway and this is good enough as an invalidation.
|
||||
# - If the client refreshes their token in MAS.
|
||||
# In this case, the device still exists and it's not the end of the world for
|
||||
# the old access token to continue working for a short time.
|
||||
self._introspection_cache: ResponseCache[str] = ResponseCache(
|
||||
self._clock,
|
||||
"token_introspection",
|
||||
timeout_ms=120_000,
|
||||
# don't log because the keys are access tokens
|
||||
enable_logging=False,
|
||||
)
|
||||
|
||||
self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata](
|
||||
self._load_metadata
|
||||
@@ -193,7 +278,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
metadata = await self._issuer_metadata.get()
|
||||
return metadata.get("introspection_endpoint")
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
async def _introspect_token(
|
||||
self, token: str, cache_context: ResponseCacheContext[str]
|
||||
) -> IntrospectionResult:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
|
||||
@@ -209,6 +296,8 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
# By default, we shouldn't cache the result unless we know it's valid
|
||||
cache_context.should_cache = False
|
||||
introspection_endpoint = await self._introspection_endpoint()
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
@@ -266,7 +355,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"The introspection endpoint returned an invalid JSON response."
|
||||
)
|
||||
|
||||
return IntrospectionToken(**resp)
|
||||
# We had a valid response, so we can cache it
|
||||
cache_context.should_cache = True
|
||||
return IntrospectionResult(
|
||||
IntrospectionToken(**resp), retrieved_at_ms=self._clock.time_msec()
|
||||
)
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
return "urn:synapse:admin:*" in requester.scope
|
||||
@@ -277,6 +370,55 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
allow_guest: If False, will raise an AuthError if the user making the
|
||||
request is a guest.
|
||||
allow_expired: If True, allow the request through even if the account
|
||||
is expired, or session token lifetime has ended. Note that
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired, allow_locked
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
if requester.authenticated_entity in self._force_tracing_for_users:
|
||||
# request tracing is enabled for this user, so we need to force it
|
||||
# tracing on for the parent span (which will be the servlet span).
|
||||
#
|
||||
# It's too late for the get_user_by_req span to inherit the setting,
|
||||
# so we also force it on for that.
|
||||
force_tracing()
|
||||
force_tracing(parent_span)
|
||||
parent_span.set_tag(
|
||||
"authenticated_entity", requester.authenticated_entity
|
||||
)
|
||||
parent_span.set_tag("user_id", requester.user.to_string())
|
||||
if requester.device_id is not None:
|
||||
parent_span.set_tag("device_id", requester.device_id)
|
||||
if requester.app_service is not None:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
@@ -344,7 +486,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
)
|
||||
|
||||
try:
|
||||
introspection_result = await self._introspect_token(token)
|
||||
introspection_result = await self._introspection_cache.wrap(
|
||||
token, self._introspect_token, token, cache_context=True
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to introspect token")
|
||||
raise SynapseError(503, "Unable to introspect the access token")
|
||||
@@ -353,11 +497,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
# TODO: introspection verification should be more extensive, especially:
|
||||
# - verify the audience
|
||||
if not introspection_result.get("active"):
|
||||
if not introspection_result.is_active(self._clock.time_msec()):
|
||||
raise InvalidClientTokenError("Token is not active")
|
||||
|
||||
# Let's look at the scope
|
||||
scope: List[str] = scope_to_list(introspection_result.get("scope", ""))
|
||||
scope: List[str] = introspection_result.get_scope_list()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
@@ -367,7 +511,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
sub: Optional[str] = introspection_result.get("sub")
|
||||
sub = introspection_result.get_sub()
|
||||
if sub is None:
|
||||
raise InvalidClientTokenError(
|
||||
"Invalid sub claim in the introspection result"
|
||||
@@ -380,29 +524,20 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# If we could not find a user via the external_id, it either does not exist,
|
||||
# or the external_id was never recorded
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
username: Optional[str] = introspection_result.get("username")
|
||||
if username is None or not isinstance(username, str):
|
||||
username = introspection_result.get_username()
|
||||
if username is None:
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid username claim in the introspection result",
|
||||
)
|
||||
user_id = UserID(username, self._hostname)
|
||||
|
||||
# First try to find a user from the username claim
|
||||
# Try to find a user from the username claim
|
||||
user_info = await self.store.get_user_by_id(user_id=user_id.to_string())
|
||||
if user_info is None:
|
||||
# If the user does not exist, we should create it on the fly
|
||||
# TODO: we could use SCIM to provision users ahead of time and listen
|
||||
# for SCIM SET events if those ever become standard:
|
||||
# https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
# If present, use the name claim as the displayname
|
||||
name: Optional[str] = introspection_result.get("name")
|
||||
|
||||
await self.store.register_user(
|
||||
user_id=user_id.to_string(), create_profile_with_displayname=name
|
||||
raise AuthError(
|
||||
500,
|
||||
"User not found",
|
||||
)
|
||||
|
||||
# And record the sub as external_id
|
||||
@@ -414,15 +549,8 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
# MAS 0.15+ will give us the device ID as an explicit value for compatibility sessions
|
||||
# If present, we get it from here, if not we get it in thee scope
|
||||
device_id = introspection_result.get("device_id")
|
||||
if device_id is not None:
|
||||
# We got the device ID explicitly, just sanity check that it's a string
|
||||
if not isinstance(device_id, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid device ID in introspection result",
|
||||
)
|
||||
else:
|
||||
device_id = introspection_result.get_device_id()
|
||||
if device_id is None:
|
||||
# Find device_ids in scope
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
@@ -449,17 +577,10 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"Invalid device ID in introspection result",
|
||||
)
|
||||
|
||||
# Create the device on the fly if it does not exist
|
||||
try:
|
||||
await self.store.get_device(
|
||||
user_id=user_id.to_string(), device_id=device_id
|
||||
)
|
||||
except StoreError:
|
||||
await self.store.store_device(
|
||||
user_id=user_id.to_string(),
|
||||
device_id=device_id,
|
||||
initial_device_display_name="OIDC-native client",
|
||||
)
|
||||
# Make sure the device exists
|
||||
await self.store.get_device(
|
||||
user_id=user_id.to_string(), device_id=device_id
|
||||
)
|
||||
|
||||
# TODO: there is a few things missing in the requester here, which still need
|
||||
# to be figured out, like:
|
||||
|
||||
@@ -29,8 +29,13 @@ from typing import Final
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
# Max/min size of ints in canonical JSON
|
||||
CANONICALJSON_MAX_INT = (2**53) - 1
|
||||
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
|
||||
|
||||
# the "depth" field on events is limited to the same as what
|
||||
# canonicaljson accepts
|
||||
MAX_DEPTH = CANONICALJSON_MAX_INT
|
||||
|
||||
# the maximum length for a room alias is 255 characters
|
||||
MAX_ALIAS_LENGTH = 255
|
||||
|
||||
@@ -20,8 +20,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
from typing import Dict, Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.config.ratelimiting import RatelimitSettings
|
||||
@@ -80,12 +79,14 @@ class Ratelimiter:
|
||||
self.store = store
|
||||
self._limiter_name = cfg.key
|
||||
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# A dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
# * The number of tokens currently in the bucket,
|
||||
# * The time point when the bucket was last completely empty, and
|
||||
# * The rate_hz (leak rate) of this particular bucket.
|
||||
self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
|
||||
self.actions: Dict[Hashable, Tuple[float, float, float]] = {}
|
||||
|
||||
self.clock.looping_call(self._prune_message_counts, 60 * 1000)
|
||||
|
||||
def _get_key(
|
||||
self, requester: Optional[Requester], key: Optional[Hashable]
|
||||
@@ -169,9 +170,6 @@ class Ratelimiter:
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
burst_count = burst_count if burst_count is not None else self.burst_count
|
||||
|
||||
# Remove any expired entries
|
||||
self._prune_message_counts(time_now_s)
|
||||
|
||||
# Check if there is an existing count entry for this key
|
||||
action_count, time_start, _ = self._get_action_counts(key, time_now_s)
|
||||
|
||||
@@ -246,13 +244,12 @@ class Ratelimiter:
|
||||
action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
|
||||
self.actions[key] = (action_count + n_actions, time_start, rate_hz)
|
||||
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
def _prune_message_counts(self) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
Args:
|
||||
time_now_s: The current time
|
||||
"""
|
||||
time_now_s = self.clock.time()
|
||||
|
||||
# We create a copy of the key list here as the dictionary is modified during
|
||||
# the loop
|
||||
for key in list(self.actions.keys()):
|
||||
|
||||
@@ -52,7 +52,7 @@ from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
@@ -190,6 +190,7 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
resources.update(build_synapse_client_resource_tree(self))
|
||||
resources["/.well-known"] = well_known_resource(self)
|
||||
resources["/_synapse/admin"] = AdminRestResource(self)
|
||||
|
||||
elif name == "federation":
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(self)
|
||||
|
||||
@@ -356,6 +356,9 @@ def _parse_oidc_config_dict(
|
||||
additional_authorization_parameters=oidc_config.get(
|
||||
"additional_authorization_parameters", {}
|
||||
),
|
||||
passthrough_authorization_parameters=oidc_config.get(
|
||||
"passthrough_authorization_parameters", []
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -501,3 +504,6 @@ class OidcProviderConfig:
|
||||
|
||||
# Additional parameters that will be passed to the authorization grant URL
|
||||
additional_authorization_parameters: Mapping[str, str]
|
||||
|
||||
# Allow query parameters to the redirect endpoint that will be passed to the authorization grant URL
|
||||
passthrough_authorization_parameters: Collection[str]
|
||||
|
||||
@@ -552,23 +552,11 @@ def _is_membership_change_allowed(
|
||||
|
||||
key = (EventTypes.JoinRules, "")
|
||||
join_rule_event = auth_events.get(key)
|
||||
join_policy_server: Optional[str] = None
|
||||
if join_rule_event:
|
||||
join_rule = join_rule_event.content.get("join_rule", JoinRules.INVITE)
|
||||
join_policy_server = join_rule_event.content.get("re.jki.join_policy_server")
|
||||
else:
|
||||
join_rule = JoinRules.INVITE
|
||||
|
||||
if (
|
||||
join_policy_server
|
||||
and membership == Membership.JOIN
|
||||
and not (caller_in_room or caller_invited)
|
||||
):
|
||||
logger.info("Checking sigs")
|
||||
if not event.signatures.get(join_policy_server):
|
||||
raise AuthError(403, "Not signed by join policy server")
|
||||
caller_invited = True
|
||||
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
target_level = get_user_power_level(target_user_id, auth_events)
|
||||
|
||||
|
||||
@@ -40,6 +40,8 @@ import attr
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from synapse.api.constants import (
|
||||
CANONICALJSON_MAX_INT,
|
||||
CANONICALJSON_MIN_INT,
|
||||
MAX_PDU_SIZE,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
@@ -61,9 +63,6 @@ SPLIT_FIELD_REGEX = re.compile(r"\\*\.")
|
||||
# Find escaped characters, e.g. those with a \ in front of them.
|
||||
ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)")
|
||||
|
||||
CANONICALJSON_MAX_INT = (2**53) - 1
|
||||
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
|
||||
|
||||
|
||||
# Module API callback that allows adding fields to the unsigned section of
|
||||
# events that are sent to clients.
|
||||
|
||||
@@ -86,9 +86,7 @@ class EventValidator:
|
||||
|
||||
# Depending on the room version, ensure the data is spec compliant JSON.
|
||||
if event.room_version.strict_canonicaljson:
|
||||
# Note that only the client controlled portion of the event is
|
||||
# checked, since we trust the portions of the event we created.
|
||||
validate_canonicaljson(event.content)
|
||||
validate_canonicaljson(event.get_pdu_json())
|
||||
|
||||
if event.type == EventTypes.Aliases:
|
||||
if "aliases" in event.content:
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Optional
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Sequence
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -29,6 +29,7 @@ from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.crypto.keyring import Keyring
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event, validate_canonicaljson
|
||||
from synapse.federation.units import filter_pdus_for_valid_depth
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
@@ -267,6 +268,15 @@ def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def parse_events_from_pdu_json(
|
||||
pdus_json: Sequence[JsonDict], room_version: RoomVersion
|
||||
) -> List[EventBase]:
|
||||
return [
|
||||
event_from_pdu_json(pdu_json, room_version)
|
||||
for pdu_json in filter_pdus_for_valid_depth(pdus_json)
|
||||
]
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase:
|
||||
"""Construct an EventBase from an event json received over federation
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
event_from_pdu_json,
|
||||
parse_events_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.client import is_unknown_endpoint
|
||||
@@ -349,7 +350,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
|
||||
pdus = parse_events_from_pdu_json(transaction_data_pdus, room_version)
|
||||
|
||||
# Check signatures and hash of pdus, removing any from the list that fail checks
|
||||
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
@@ -393,9 +394,7 @@ class FederationClient(FederationBase):
|
||||
transaction_data,
|
||||
)
|
||||
|
||||
pdu_list: List[EventBase] = [
|
||||
event_from_pdu_json(p, room_version) for p in transaction_data["pdus"]
|
||||
]
|
||||
pdu_list = parse_events_from_pdu_json(transaction_data["pdus"], room_version)
|
||||
|
||||
if pdu_list and pdu_list[0]:
|
||||
pdu = pdu_list[0]
|
||||
@@ -809,7 +808,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
|
||||
auth_chain = parse_events_from_pdu_json(res["auth_chain"], room_version)
|
||||
|
||||
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, auth_chain, room_version=room_version
|
||||
@@ -1529,9 +1528,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
events = [
|
||||
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
||||
]
|
||||
events = parse_events_from_pdu_json(content.get("events", []), room_version)
|
||||
|
||||
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, events, room_version=room_version
|
||||
@@ -1960,43 +1957,6 @@ class FederationClient(FederationBase):
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def join_policy_server_get_url(
|
||||
self, policy_server: str, room_id: str, room_version: RoomVersion, user_id: str
|
||||
) -> Optional[str]:
|
||||
result = await self.transport_layer.join_policy_server_get_url(
|
||||
policy_server=policy_server,
|
||||
room_id=room_id,
|
||||
room_version=room_version,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
url = result.get("url")
|
||||
if isinstance(url, str):
|
||||
return url
|
||||
return None
|
||||
|
||||
async def join_policy_server_sign_join(
|
||||
self,
|
||||
policy_server: str,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
token: str,
|
||||
room_version: RoomVersion,
|
||||
event: EventBase,
|
||||
) -> None:
|
||||
result = await self.transport_layer.join_policy_server_sign_join(
|
||||
policy_server=policy_server,
|
||||
room_id=room_id,
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
room_version=room_version,
|
||||
event=event,
|
||||
)
|
||||
|
||||
signatures = result.get("signatures")
|
||||
if signatures:
|
||||
event.signatures[policy_server] = signatures
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class TimestampToEventResponse:
|
||||
|
||||
@@ -66,7 +66,7 @@ from synapse.federation.federation_base import (
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
@@ -469,7 +469,12 @@ class FederationServer(FederationBase):
|
||||
logger.info("Ignoring PDU: %s", e)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p, room_version)
|
||||
try:
|
||||
event = event_from_pdu_json(p, room_version)
|
||||
except SynapseError as e:
|
||||
logger.info("Ignoring PDU for failing to deserialize: %s", e)
|
||||
continue
|
||||
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
if event.origin_server_ts > newest_pdu_ts:
|
||||
@@ -636,8 +641,8 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
"pdus": serialize_and_filter_pdus(pdus),
|
||||
"auth_chain": serialize_and_filter_pdus(auth_chain),
|
||||
}
|
||||
|
||||
async def on_pdu_request(
|
||||
@@ -761,8 +766,8 @@ class FederationServer(FederationBase):
|
||||
event_json = event.get_pdu_json(time_now)
|
||||
resp = {
|
||||
"event": event_json,
|
||||
"state": [p.get_pdu_json(time_now) for p in state_events],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
|
||||
"state": serialize_and_filter_pdus(state_events, time_now),
|
||||
"auth_chain": serialize_and_filter_pdus(auth_chain_events, time_now),
|
||||
"members_omitted": caller_supports_partial_state,
|
||||
}
|
||||
|
||||
@@ -1005,7 +1010,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = await self.handler.on_event_auth(event_id)
|
||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||
res = {"auth_chain": serialize_and_filter_pdus(auth_pdus, time_now)}
|
||||
return 200, res
|
||||
|
||||
async def on_query_client_keys(
|
||||
@@ -1090,7 +1095,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
||||
return {"events": serialize_and_filter_pdus(missing_events, time_now)}
|
||||
|
||||
async def on_openid_userinfo(self, token: str) -> Optional[str]:
|
||||
ts_now_ms = self._clock.time_msec()
|
||||
|
||||
@@ -894,46 +894,6 @@ class TransportLayerClient:
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def join_policy_server_get_url(
|
||||
self, policy_server: str, room_id: str, room_version: RoomVersion, user_id: str
|
||||
) -> JsonDict:
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/re.jki.join_policy/request_join"
|
||||
)
|
||||
return await self.client.post_json(
|
||||
policy_server,
|
||||
path,
|
||||
data={
|
||||
"room_id": room_id,
|
||||
"room_version": room_version.identifier,
|
||||
"user_id": user_id,
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def join_policy_server_sign_join(
|
||||
self,
|
||||
policy_server: str,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
token: str,
|
||||
room_version: RoomVersion,
|
||||
event: EventBase,
|
||||
) -> JsonDict:
|
||||
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/re.jki.join_policy/sign_join")
|
||||
return await self.client.post_json(
|
||||
policy_server,
|
||||
path,
|
||||
data={
|
||||
"room_id": room_id,
|
||||
"user_id": user_id,
|
||||
"token": token,
|
||||
"room_version": room_version.identifier,
|
||||
"event": event.get_pdu_json(),
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
|
||||
"""
|
||||
|
||||
@@ -24,10 +24,12 @@ server protocol.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Sequence
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import CANONICALJSON_MAX_INT, CANONICALJSON_MIN_INT
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -104,8 +106,28 @@ class Transaction:
|
||||
result = {
|
||||
"origin": self.origin,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
"pdus": self.pdus,
|
||||
"pdus": filter_pdus_for_valid_depth(self.pdus),
|
||||
}
|
||||
if self.edus:
|
||||
result["edus"] = self.edus
|
||||
return result
|
||||
|
||||
|
||||
def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]:
|
||||
filtered_pdus = []
|
||||
for pdu in pdus:
|
||||
# Drop PDUs that have a depth that is outside of the range allowed
|
||||
# by canonical json.
|
||||
if (
|
||||
"depth" in pdu
|
||||
and CANONICALJSON_MIN_INT <= pdu["depth"] <= CANONICALJSON_MAX_INT
|
||||
):
|
||||
filtered_pdus.append(pdu)
|
||||
|
||||
return filtered_pdus
|
||||
|
||||
|
||||
def serialize_and_filter_pdus(
|
||||
pdus: Sequence[EventBase], time_now: Optional[int] = None
|
||||
) -> List[JsonDict]:
|
||||
return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus])
|
||||
|
||||
@@ -163,6 +163,8 @@ class DeviceWorkerHandler:
|
||||
raise errors.NotFoundError()
|
||||
|
||||
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
|
||||
|
||||
device = dict(device)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
|
||||
set_tag("device", str(device))
|
||||
|
||||
@@ -467,6 +467,10 @@ class OidcProvider:
|
||||
|
||||
self._sso_handler.register_identity_provider(self)
|
||||
|
||||
self.passthrough_authorization_parameters = (
|
||||
provider.passthrough_authorization_parameters
|
||||
)
|
||||
|
||||
def _validate_metadata(self, m: OpenIDProviderMetadata) -> None:
|
||||
"""Verifies the provider metadata.
|
||||
|
||||
@@ -582,6 +586,24 @@ class OidcProvider:
|
||||
or self._user_profile_method == "userinfo_endpoint"
|
||||
)
|
||||
|
||||
@property
|
||||
def _uses_access_token(self) -> bool:
|
||||
"""Return True if the `access_token` will be used during the login process.
|
||||
|
||||
This is useful to determine whether the access token
|
||||
returned by the identity provider, and
|
||||
any related metadata (such as the `at_hash` field in
|
||||
the ID token), should be validated.
|
||||
"""
|
||||
# Currently, Synapse only uses the access_token to fetch user metadata
|
||||
# from the userinfo endpoint. Therefore we only have a single criteria
|
||||
# to check right now but this may change in the future and this function
|
||||
# should be updated if more usages are introduced.
|
||||
#
|
||||
# For example, if we start to use the access_token given to us by the
|
||||
# IdP for more things, such as accessing Resource Server APIs.
|
||||
return self._uses_userinfo
|
||||
|
||||
@property
|
||||
def issuer(self) -> str:
|
||||
"""The issuer identifying this provider."""
|
||||
@@ -953,9 +975,16 @@ class OidcProvider:
|
||||
"nonce": nonce,
|
||||
"client_id": self._client_auth.client_id,
|
||||
}
|
||||
if "access_token" in token:
|
||||
if self._uses_access_token and "access_token" in token:
|
||||
# If we got an `access_token`, there should be an `at_hash` claim
|
||||
# in the `id_token` that we can check against.
|
||||
# in the `id_token` that we can check against. Setting this
|
||||
# instructs authlib to check the value of `at_hash` in the
|
||||
# ID token.
|
||||
#
|
||||
# We only need to verify the access token if we actually make
|
||||
# use of it. Which currently only happens when we need to fetch
|
||||
# the user's information from the userinfo_endpoint. Thus, this
|
||||
# check is also gated on self._uses_userinfo.
|
||||
claims_params["access_token"] = token["access_token"]
|
||||
|
||||
claims_options = {"iss": {"values": [metadata["issuer"]]}}
|
||||
@@ -1005,7 +1034,6 @@ class OidcProvider:
|
||||
when everything is done (or None for UI Auth)
|
||||
ui_auth_session_id: The session ID of the ongoing UI Auth (or
|
||||
None if this is a login).
|
||||
|
||||
Returns:
|
||||
The redirect URL to the authorization endpoint.
|
||||
|
||||
@@ -1078,6 +1106,13 @@ class OidcProvider:
|
||||
)
|
||||
)
|
||||
|
||||
# add passthrough additional authorization parameters
|
||||
passthrough_authorization_parameters = self.passthrough_authorization_parameters
|
||||
for parameter in passthrough_authorization_parameters:
|
||||
parameter_value = parse_string(request, parameter)
|
||||
if parameter_value:
|
||||
additional_authorization_parameters.update({parameter: parameter_value})
|
||||
|
||||
authorization_endpoint = metadata.get("authorization_endpoint")
|
||||
return prepare_grant_uri(
|
||||
authorization_endpoint,
|
||||
|
||||
@@ -398,7 +398,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
require_consent: bool = True,
|
||||
outlier: bool = False,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
join_policy_token: Optional[str] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""
|
||||
Internal membership update function to get an existing event or create
|
||||
@@ -492,49 +491,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
)
|
||||
context = await unpersisted_context.persist(event)
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id), (EventTypes.JoinRules, "")]
|
||||
)
|
||||
StateFilter.from_types([(EventTypes.Member, user_id)])
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
join_rule_id = prev_state_ids.get((EventTypes.JoinRules, ""))
|
||||
if join_rule_id is not None:
|
||||
join_rule_event = await self.store.get_event(
|
||||
join_rule_id, allow_none=True
|
||||
)
|
||||
if join_rule_event:
|
||||
join_policy_server = join_rule_event.content.get(
|
||||
"re.jki.join_policy_server"
|
||||
)
|
||||
if isinstance(join_policy_server, str):
|
||||
if join_policy_token is None:
|
||||
policy_url = await self.federation_handler.federation_client.join_policy_server_get_url(
|
||||
policy_server=join_policy_server,
|
||||
room_id=room_id,
|
||||
room_version=event.room_version,
|
||||
user_id=target.to_string(),
|
||||
)
|
||||
|
||||
if policy_url is not None:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot join room",
|
||||
errcode="RE_JKI_JOIN_POLICY_URL",
|
||||
additional_fields={
|
||||
"re.jki.join_policy_url": policy_url
|
||||
},
|
||||
)
|
||||
else:
|
||||
await self.federation_handler.federation_client.join_policy_server_sign_join(
|
||||
policy_server=join_policy_server,
|
||||
room_id=room_id,
|
||||
room_version=event.room_version,
|
||||
user_id=target.to_string(),
|
||||
token=join_policy_token,
|
||||
event=event,
|
||||
)
|
||||
|
||||
prev_member_event_id = prev_state_ids.get(
|
||||
(EventTypes.Member, user_id), None
|
||||
)
|
||||
@@ -625,7 +584,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
depth: Optional[int] = None,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
join_policy_token: Optional[str] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""Update a user's membership in a room.
|
||||
|
||||
@@ -723,7 +681,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
state_event_ids=state_event_ids,
|
||||
depth=depth,
|
||||
origin_server_ts=origin_server_ts,
|
||||
join_policy_token=join_policy_token,
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -747,7 +704,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
depth: Optional[int] = None,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
join_policy_token: Optional[str] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""Helper for update_membership.
|
||||
|
||||
@@ -973,7 +929,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
origin_server_ts=origin_server_ts,
|
||||
join_policy_token=join_policy_token,
|
||||
)
|
||||
|
||||
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
|
||||
@@ -1233,7 +1188,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
origin_server_ts=origin_server_ts,
|
||||
join_policy_token=join_policy_token,
|
||||
)
|
||||
|
||||
async def check_for_any_membership_in_room(
|
||||
|
||||
@@ -59,7 +59,11 @@ from synapse.media._base import (
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import (
|
||||
MediaStorage,
|
||||
SHA256TransparentIOReader,
|
||||
SHA256TransparentIOWriter,
|
||||
)
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
@@ -301,15 +305,26 @@ class MediaRepository:
|
||||
auth_user: The user_id of the uploader
|
||||
"""
|
||||
file_info = FileInfo(server_name=None, file_id=media_id)
|
||||
fname = await self.media_storage.store_file(content, file_info)
|
||||
sha256reader = SHA256TransparentIOReader(content)
|
||||
# This implements all of IO as it has a passthrough
|
||||
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
|
||||
sha256 = sha256reader.hexdigest()
|
||||
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
|
||||
logger.info("Stored local media in file %r", fname)
|
||||
|
||||
if should_quarantine:
|
||||
logger.warn(
|
||||
"Media has been automatically quarantined as it matched existing quarantined media"
|
||||
)
|
||||
|
||||
await self.store.update_local_media(
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
upload_name=upload_name,
|
||||
media_length=content_length,
|
||||
user_id=auth_user,
|
||||
sha256=sha256,
|
||||
quarantined_by="system" if should_quarantine else None,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -342,11 +357,19 @@ class MediaRepository:
|
||||
media_id = random_string(24)
|
||||
|
||||
file_info = FileInfo(server_name=None, file_id=media_id)
|
||||
|
||||
fname = await self.media_storage.store_file(content, file_info)
|
||||
# This implements all of IO as it has a passthrough
|
||||
sha256reader = SHA256TransparentIOReader(content)
|
||||
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
|
||||
sha256 = sha256reader.hexdigest()
|
||||
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
|
||||
|
||||
logger.info("Stored local media in file %r", fname)
|
||||
|
||||
if should_quarantine:
|
||||
logger.warn(
|
||||
"Media has been automatically quarantined as it matched existing quarantined media"
|
||||
)
|
||||
|
||||
await self.store.store_local_media(
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
@@ -354,6 +377,9 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=content_length,
|
||||
user_id=auth_user,
|
||||
sha256=sha256,
|
||||
# TODO: Better name?
|
||||
quarantined_by="system" if should_quarantine else None,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -756,11 +782,13 @@ class MediaRepository:
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
sha256writer = SHA256TransparentIOWriter(f)
|
||||
try:
|
||||
length, headers = await self.client.download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
# This implements all of BinaryIO as it has a passthrough
|
||||
output_stream=sha256writer.wrap(),
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
@@ -825,6 +853,7 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
logger.info("Stored remote media in file %r", fname)
|
||||
@@ -845,6 +874,7 @@ class MediaRepository:
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
@@ -879,11 +909,13 @@ class MediaRepository:
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
sha256writer = SHA256TransparentIOWriter(f)
|
||||
try:
|
||||
res = await self.client.federation_download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
# This implements all of BinaryIO as it has a passthrough
|
||||
output_stream=sha256writer.wrap(),
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
@@ -954,6 +986,7 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
@@ -974,6 +1007,7 @@ class MediaRepository:
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#
|
||||
#
|
||||
import contextlib
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -70,6 +71,88 @@ logger = logging.getLogger(__name__)
|
||||
CRLF = b"\r\n"
|
||||
|
||||
|
||||
class SHA256TransparentIOWriter:
|
||||
"""Will generate a SHA256 hash from a source stream transparently.
|
||||
|
||||
Args:
|
||||
source: Source stream.
|
||||
"""
|
||||
|
||||
def __init__(self, source: BinaryIO):
|
||||
self._hash = hashlib.sha256()
|
||||
self._source = source
|
||||
|
||||
def write(self, buffer: Union[bytes, bytearray]) -> int:
|
||||
"""Wrapper for source.write()
|
||||
|
||||
Args:
|
||||
buffer
|
||||
|
||||
Returns:
|
||||
the value of source.write()
|
||||
"""
|
||||
res = self._source.write(buffer)
|
||||
self._hash.update(buffer)
|
||||
return res
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""The digest of the written or read value.
|
||||
|
||||
Returns:
|
||||
The digest in hex formaat.
|
||||
"""
|
||||
return self._hash.hexdigest()
|
||||
|
||||
def wrap(self) -> BinaryIO:
|
||||
# This class implements a subset the IO interface and passes through everything else via __getattr__
|
||||
return cast(BinaryIO, self)
|
||||
|
||||
# Passthrough any other calls
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self._source, attr_name)
|
||||
|
||||
|
||||
class SHA256TransparentIOReader:
|
||||
"""Will generate a SHA256 hash from a source stream transparently.
|
||||
|
||||
Args:
|
||||
source: Source IO stream.
|
||||
"""
|
||||
|
||||
def __init__(self, source: IO):
|
||||
self._hash = hashlib.sha256()
|
||||
self._source = source
|
||||
|
||||
def read(self, n: int = -1) -> bytes:
|
||||
"""Wrapper for source.read()
|
||||
|
||||
Args:
|
||||
n
|
||||
|
||||
Returns:
|
||||
the value of source.read()
|
||||
"""
|
||||
bytes = self._source.read(n)
|
||||
self._hash.update(bytes)
|
||||
return bytes
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""The digest of the written or read value.
|
||||
|
||||
Returns:
|
||||
The digest in hex formaat.
|
||||
"""
|
||||
return self._hash.hexdigest()
|
||||
|
||||
def wrap(self) -> IO:
|
||||
# This class implements a subset the IO interface and passes through everything else via __getattr__
|
||||
return cast(IO, self)
|
||||
|
||||
# Passthrough any other calls
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self._source, attr_name)
|
||||
|
||||
|
||||
class MediaStorage:
|
||||
"""Responsible for storing/fetching files from local sources.
|
||||
|
||||
@@ -107,7 +190,6 @@ class MediaStorage:
|
||||
Returns:
|
||||
the file path written to in the primary media store
|
||||
"""
|
||||
|
||||
async with self.store_into_file(file_info) as (f, fname):
|
||||
# Write to the main media repository
|
||||
await self.write_to_file(source, f)
|
||||
|
||||
+30
-29
@@ -66,7 +66,6 @@ from synapse.types import (
|
||||
from synapse.util.async_helpers import (
|
||||
timeout_deferred,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import shortstr
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
@@ -520,20 +519,22 @@ class Notifier:
|
||||
users = users or []
|
||||
rooms = rooms or []
|
||||
|
||||
with Measure(self.clock, "on_new_event"):
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
|
||||
# Only calculate which user streams to wake up if there are, in fact,
|
||||
# any user streams registered.
|
||||
if self.user_to_user_stream or self.room_to_user_streams:
|
||||
for user in users:
|
||||
user_stream = self.user_to_user_stream.get(str(user))
|
||||
if user_stream is not None:
|
||||
@@ -565,25 +566,25 @@ class Notifier:
|
||||
# We resolve all these deferreds in one go so that we only need to
|
||||
# call `PreserveLoggingContext` once, as it has a bunch of overhead
|
||||
# (to calculate performance stats)
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
if listeners:
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
if user_streams:
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
|
||||
self.notify_replication()
|
||||
self.notify_replication()
|
||||
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error notifying application services of ephemeral events"
|
||||
)
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of ephemeral events")
|
||||
|
||||
def on_new_replication_data(self) -> None:
|
||||
"""Used to inform replication listeners that something has happened
|
||||
|
||||
@@ -205,6 +205,12 @@ class HttpPusher(Pusher):
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
# Check if we are trying, but failing, to contact the pusher. If so, we
|
||||
# don't try and start processing immediately and instead wait for the
|
||||
# retry loop to try again later (which is controlled by the timer).
|
||||
if self.failing_since and self.timed_call and self.timed_call.active():
|
||||
return
|
||||
|
||||
run_as_background_process("httppush.process", self._process)
|
||||
|
||||
async def _process(self) -> None:
|
||||
|
||||
@@ -86,6 +86,7 @@ from synapse.rest.admin.rooms import (
|
||||
RoomStateRestServlet,
|
||||
RoomTimestampToEventRestServlet,
|
||||
)
|
||||
from synapse.rest.admin.scheduled_tasks import ScheduledTasksRestServlet
|
||||
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
|
||||
from synapse.rest.admin.statistics import (
|
||||
LargestRoomsStatistics,
|
||||
@@ -275,7 +276,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
"""
|
||||
Register all the admin servlets.
|
||||
"""
|
||||
# Admin servlets aren't registered on workers.
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
|
||||
# Admin servlets below may not work on workers.
|
||||
if hs.config.worker.worker_app is not None:
|
||||
return
|
||||
|
||||
@@ -283,7 +286,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BlockRoomRestServlet(hs).register(http_server)
|
||||
ListRoomRestServlet(hs).register(http_server)
|
||||
RoomStateRestServlet(hs).register(http_server)
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
RoomRestV2Servlet(hs).register(http_server)
|
||||
RoomMembersRestServlet(hs).register(http_server)
|
||||
DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)
|
||||
@@ -337,6 +339,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
ExperimentalFeaturesRestServlet(hs).register(http_server)
|
||||
SuspendAccountRestServlet(hs).register(http_server)
|
||||
ScheduledTasksRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2025 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
#
|
||||
#
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.http.servlet import RestServlet, parse_integer, parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin import admin_patterns, assert_requester_is_admin
|
||||
from synapse.types import JsonDict, TaskStatus
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class ScheduledTasksRestServlet(RestServlet):
|
||||
"""Get a list of scheduled tasks and their statuses
|
||||
optionally filtered by action name, resource id, status, and max timestamp
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/scheduled_tasks$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
|
||||
# extract query params
|
||||
action_name = parse_string(request, "action_name")
|
||||
resource_id = parse_string(request, "resource_id")
|
||||
status = parse_string(request, "job_status")
|
||||
max_timestamp = parse_integer(request, "max_timestamp")
|
||||
|
||||
actions = [action_name] if action_name else None
|
||||
statuses = [TaskStatus(status)] if status else None
|
||||
|
||||
tasks = await self._store.get_scheduled_tasks(
|
||||
actions=actions,
|
||||
resource_id=resource_id,
|
||||
statuses=statuses,
|
||||
max_timestamp=max_timestamp,
|
||||
)
|
||||
|
||||
json_tasks = []
|
||||
for task in tasks:
|
||||
result_task = {
|
||||
"id": task.id,
|
||||
"action": task.action,
|
||||
"status": task.status,
|
||||
"timestamp_ms": task.timestamp,
|
||||
"resource_id": task.resource_id,
|
||||
"result": task.result,
|
||||
"error": task.error,
|
||||
}
|
||||
json_tasks.append(result_task)
|
||||
|
||||
return 200, {"scheduled_tasks": json_tasks}
|
||||
@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
|
||||
self._is_main_process = hs.config.worker.worker_app is None
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
@@ -179,6 +179,14 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_DELETE(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "DELETE on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
try:
|
||||
@@ -223,6 +231,14 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "PUT on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PutBody)
|
||||
@@ -585,9 +601,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
):
|
||||
DeleteDevicesRestServlet(hs).register(http_server)
|
||||
DevicesRestServlet(hs).register(http_server)
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.worker.worker_app is None:
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc2697_enabled:
|
||||
DehydratedDeviceServlet(hs).register(http_server)
|
||||
ClaimDehydratedDeviceServlet(hs).register(http_server)
|
||||
|
||||
@@ -528,12 +528,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
remote_room_hosts,
|
||||
)
|
||||
|
||||
join_policy_token = parse_string(
|
||||
request, "re.jki.join_policy_token", required=False
|
||||
)
|
||||
|
||||
logger.info("re.jki.join_policy_token: %s", join_policy_token)
|
||||
|
||||
await self.room_member_handler.update_membership(
|
||||
requester=requester,
|
||||
target=requester.user,
|
||||
@@ -543,7 +537,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
content=content,
|
||||
third_party_signed=content.get("third_party_signed", None),
|
||||
join_policy_token=join_policy_token,
|
||||
)
|
||||
|
||||
return 200, {"room_id": room_id}
|
||||
|
||||
@@ -24,7 +24,7 @@ from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
|
||||
from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from synapse.api.filtering import FilterCollection
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
@@ -248,9 +248,8 @@ class SyncRestServlet(RestServlet):
|
||||
await self._server_notices_sender.on_user_syncing(user.to_string())
|
||||
|
||||
# ignore the presence update if the ratelimit is exceeded but do not pause the request
|
||||
try:
|
||||
await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
|
||||
except LimitExceededError:
|
||||
allowed, _ = await self._presence_per_user_limiter.can_do_action(requester)
|
||||
if not allowed:
|
||||
affect_presence = False
|
||||
logger.debug("User set_presence ratelimit exceeded; ignoring it.")
|
||||
else:
|
||||
|
||||
@@ -282,9 +282,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
"count_devices_by_users", count_devices_by_users_txn, user_ids
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def get_device(
|
||||
self, user_id: str, device_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
) -> Optional[Mapping[str, Any]]:
|
||||
"""Retrieve a device. Only returns devices that are not marked as
|
||||
hidden.
|
||||
|
||||
@@ -1817,6 +1818,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
},
|
||||
desc="store_device",
|
||||
)
|
||||
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
|
||||
|
||||
if not inserted:
|
||||
# if the device already exists, check if it's a real device, or
|
||||
# if the device ID is reserved by something else
|
||||
@@ -1882,6 +1885,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
values=device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream_bulk(
|
||||
txn, self.get_device, [(user_id, device_id) for device_id in device_ids]
|
||||
)
|
||||
|
||||
for batch in batch_iter(device_ids, 100):
|
||||
await self.db_pool.runInteraction(
|
||||
@@ -1915,6 +1921,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
updatevalues=updates,
|
||||
desc="update_device",
|
||||
)
|
||||
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
|
||||
|
||||
async def update_remote_device_list_cache_entry(
|
||||
self, user_id: str, device_id: str, content: JsonDict, stream_id: str
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -51,6 +52,8 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = (
|
||||
"media_repository_drop_index_wo_method_2"
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class LocalMedia:
|
||||
@@ -65,6 +68,7 @@ class LocalMedia:
|
||||
safe_from_quarantine: bool
|
||||
user_id: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
sha256: Optional[str]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -79,6 +83,7 @@ class RemoteMedia:
|
||||
last_access_ts: int
|
||||
quarantined_by: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
sha256: Optional[str]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -154,6 +159,26 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
|
||||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="local_media_repository_sha256_idx",
|
||||
index_name="local_media_repository_sha256",
|
||||
table="local_media_repository",
|
||||
where_clause="sha256 IS NOT NULL",
|
||||
columns=[
|
||||
"sha256",
|
||||
],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="remote_media_cache_sha256_idx",
|
||||
index_name="remote_media_cache_sha256",
|
||||
table="remote_media_cache",
|
||||
where_clause="sha256 IS NOT NULL",
|
||||
columns=[
|
||||
"sha256",
|
||||
],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2,
|
||||
self._drop_media_index_without_method,
|
||||
@@ -221,6 +246,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"safe_from_quarantine",
|
||||
"user_id",
|
||||
"authenticated",
|
||||
"sha256",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_local_media",
|
||||
@@ -239,6 +265,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
safe_from_quarantine=row[7],
|
||||
user_id=row[8],
|
||||
authenticated=row[9],
|
||||
sha256=row[10],
|
||||
)
|
||||
|
||||
async def get_local_media_by_user_paginate(
|
||||
@@ -295,7 +322,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
quarantined_by,
|
||||
safe_from_quarantine,
|
||||
user_id,
|
||||
authenticated
|
||||
authenticated,
|
||||
sha256
|
||||
FROM local_media_repository
|
||||
WHERE user_id = ?
|
||||
ORDER BY {order_by_column} {order}, media_id ASC
|
||||
@@ -320,6 +348,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
safe_from_quarantine=bool(row[8]),
|
||||
user_id=row[9],
|
||||
authenticated=row[10],
|
||||
sha256=row[11],
|
||||
)
|
||||
for row in txn
|
||||
]
|
||||
@@ -449,6 +478,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
media_length: int,
|
||||
user_id: UserID,
|
||||
url_cache: Optional[str] = None,
|
||||
sha256: Optional[str] = None,
|
||||
quarantined_by: Optional[str] = None,
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
@@ -466,6 +497,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"user_id": user_id.to_string(),
|
||||
"url_cache": url_cache,
|
||||
"authenticated": authenticated,
|
||||
"sha256": sha256,
|
||||
"quarantined_by": quarantined_by,
|
||||
},
|
||||
desc="store_local_media",
|
||||
)
|
||||
@@ -477,20 +510,28 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
upload_name: Optional[str],
|
||||
media_length: int,
|
||||
user_id: UserID,
|
||||
sha256: str,
|
||||
url_cache: Optional[str] = None,
|
||||
quarantined_by: Optional[str] = None,
|
||||
) -> None:
|
||||
updatevalues = {
|
||||
"media_type": media_type,
|
||||
"upload_name": upload_name,
|
||||
"media_length": media_length,
|
||||
"url_cache": url_cache,
|
||||
"sha256": sha256,
|
||||
}
|
||||
|
||||
# This should never be un-set by this function.
|
||||
if quarantined_by is not None:
|
||||
updatevalues["quarantined_by"] = quarantined_by
|
||||
|
||||
await self.db_pool.simple_update_one(
|
||||
"local_media_repository",
|
||||
keyvalues={
|
||||
"user_id": user_id.to_string(),
|
||||
"media_id": media_id,
|
||||
},
|
||||
updatevalues={
|
||||
"media_type": media_type,
|
||||
"upload_name": upload_name,
|
||||
"media_length": media_length,
|
||||
"url_cache": url_cache,
|
||||
},
|
||||
updatevalues=updatevalues,
|
||||
desc="update_local_media",
|
||||
)
|
||||
|
||||
@@ -657,6 +698,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"last_access_ts",
|
||||
"quarantined_by",
|
||||
"authenticated",
|
||||
"sha256",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_cached_remote_media",
|
||||
@@ -674,6 +716,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
last_access_ts=row[5],
|
||||
quarantined_by=row[6],
|
||||
authenticated=row[7],
|
||||
sha256=row[8],
|
||||
)
|
||||
|
||||
async def store_cached_remote_media(
|
||||
@@ -685,6 +728,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
time_now_ms: int,
|
||||
upload_name: Optional[str],
|
||||
filesystem_id: str,
|
||||
sha256: Optional[str],
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
@@ -703,6 +747,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"filesystem_id": filesystem_id,
|
||||
"last_access_ts": time_now_ms,
|
||||
"authenticated": authenticated,
|
||||
"sha256": sha256,
|
||||
},
|
||||
desc="store_cached_remote_media",
|
||||
)
|
||||
@@ -946,3 +991,46 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_url_cache_media", _delete_url_cache_media_txn
|
||||
)
|
||||
|
||||
async def get_is_hash_quarantined(self, sha256: str) -> bool:
|
||||
"""Get whether a specific sha256 hash digest matches any quarantined media.
|
||||
|
||||
Returns:
|
||||
None if the media_id doesn't exist.
|
||||
"""
|
||||
|
||||
# If we don't have the index yet, performance tanks, so we return False.
|
||||
# In the background updates, remote_media_cache_sha256_idx is created
|
||||
# after local_media_repository_sha256_idx, which is why we only need to
|
||||
# check for the completion of the former.
|
||||
if not await self.db_pool.updates.has_completed_background_update(
|
||||
"remote_media_cache_sha256_idx"
|
||||
):
|
||||
return False
|
||||
|
||||
def get_matching_media_txn(
|
||||
txn: LoggingTransaction, table: str, sha256: str
|
||||
) -> bool:
|
||||
# Return on first match
|
||||
sql = """
|
||||
SELECT 1
|
||||
FROM local_media_repository
|
||||
WHERE sha256 = ? AND quarantined_by IS NOT NULL
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 1
|
||||
FROM remote_media_cache
|
||||
WHERE sha256 = ? AND quarantined_by IS NOT NULL
|
||||
LIMIT 1
|
||||
"""
|
||||
txn.execute(sql, (sha256, sha256))
|
||||
row = txn.fetchone()
|
||||
return row is not None
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_matching_media_txn",
|
||||
get_matching_media_txn,
|
||||
"local_media_repository",
|
||||
sha256,
|
||||
)
|
||||
|
||||
@@ -759,17 +759,37 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
external_id: id on that system
|
||||
user_id: complete mxid that it is mapped to
|
||||
"""
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_by_external_id, (auth_provider, external_id)
|
||||
)
|
||||
|
||||
self.db_pool.simple_insert_txn(
|
||||
# This INSERT ... ON CONFLICT DO NOTHING statement will cause a
|
||||
# 'could not serialize access due to concurrent update'
|
||||
# if the row is added concurrently by another transaction.
|
||||
# This is exactly what we want, as it makes the transaction get retried
|
||||
# in a new snapshot where we can check for a genuine conflict.
|
||||
was_inserted = self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
values={
|
||||
"auth_provider": auth_provider,
|
||||
"external_id": external_id,
|
||||
"user_id": user_id,
|
||||
},
|
||||
keyvalues={"auth_provider": auth_provider, "external_id": external_id},
|
||||
values={},
|
||||
insertion_values={"user_id": user_id},
|
||||
)
|
||||
|
||||
if not was_inserted:
|
||||
existing_id = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
keyvalues={"auth_provider": auth_provider, "user_id": user_id},
|
||||
retcol="external_id",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
if existing_id != external_id:
|
||||
raise ExternalIDReuseException(
|
||||
f"{user_id!r} has external id {existing_id!r} for {auth_provider} but trying to add {external_id!r}"
|
||||
)
|
||||
|
||||
async def remove_user_external_id(
|
||||
self, auth_provider: str, external_id: str, user_id: str
|
||||
) -> None:
|
||||
@@ -789,6 +809,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
},
|
||||
desc="remove_user_external_id",
|
||||
)
|
||||
await self.invalidate_cache_and_stream(
|
||||
"get_user_by_external_id", (auth_provider, external_id)
|
||||
)
|
||||
|
||||
async def replace_user_external_id(
|
||||
self,
|
||||
@@ -809,29 +832,20 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
ExternalIDReuseException if the new external_id could not be mapped.
|
||||
"""
|
||||
|
||||
def _remove_user_external_ids_txn(
|
||||
def _replace_user_external_id_txn(
|
||||
txn: LoggingTransaction,
|
||||
user_id: str,
|
||||
) -> None:
|
||||
"""Remove all mappings from external user ids to a mxid
|
||||
If these mappings are not found, this method does nothing.
|
||||
|
||||
Args:
|
||||
user_id: complete mxid that it is mapped to
|
||||
"""
|
||||
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
|
||||
def _replace_user_external_id_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> None:
|
||||
_remove_user_external_ids_txn(txn, user_id)
|
||||
|
||||
for auth_provider, external_id in record_external_ids:
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_by_external_id, (auth_provider, external_id)
|
||||
)
|
||||
|
||||
self._record_user_external_id_txn(
|
||||
txn,
|
||||
auth_provider,
|
||||
@@ -847,6 +861,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
except self.database_engine.module.IntegrityError:
|
||||
raise ExternalIDReuseException()
|
||||
|
||||
@cached()
|
||||
async def get_user_by_external_id(
|
||||
self, auth_provider: str, external_id: str
|
||||
) -> Optional[str]:
|
||||
|
||||
@@ -51,11 +51,15 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.events import EventBase
|
||||
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
|
||||
from synapse.storage._base import db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage._base import (
|
||||
db_to_json,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_tuple_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.types import Cursor
|
||||
@@ -1127,6 +1131,109 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
|
||||
return local_media_ids
|
||||
|
||||
def _quarantine_local_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
hashes: Set[str],
|
||||
media_ids: Set[str],
|
||||
quarantined_by: Optional[str],
|
||||
) -> int:
|
||||
"""Quarantine and unquarantine local media items.
|
||||
|
||||
Args:
|
||||
txn (cursor)
|
||||
hashes: A set of sha256 hashes for any media that should be quarantined
|
||||
media_ids: A set of media IDs for any media that should be quarantined
|
||||
quarantined_by: The ID of the user who initiated the quarantine request
|
||||
If it is `None` media will be removed from quarantine
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
total_media_quarantined = 0
|
||||
|
||||
# Effectively a legacy path, update any media that was explicitly named.
|
||||
if media_ids:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "media_id", media_ids
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
|
||||
if quarantined_by is not None:
|
||||
sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
# Update any media that was identified via hash.
|
||||
if hashes:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "sha256", hashes
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
|
||||
if quarantined_by is not None:
|
||||
sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
|
||||
def _quarantine_remote_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
hashes: Set[str],
|
||||
media: Set[Tuple[str, str]],
|
||||
quarantined_by: Optional[str],
|
||||
) -> int:
|
||||
"""Quarantine and unquarantine remote items
|
||||
|
||||
Args:
|
||||
txn (cursor)
|
||||
hashes: A set of sha256 hashes for any media that should be quarantined
|
||||
media_ids: A set of tuples (media_origin, media_id) for any media that should be quarantined
|
||||
quarantined_by: The ID of the user who initiated the quarantine request
|
||||
If it is `None` media will be removed from quarantine
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
total_media_quarantined = 0
|
||||
|
||||
if media:
|
||||
sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause(
|
||||
txn.database_engine,
|
||||
("media_origin", "media_id"),
|
||||
media,
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_in_list_clause}"""
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
total_media_quarantined = 0
|
||||
if hashes:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "sha256", hashes
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
|
||||
def _quarantine_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
@@ -1146,40 +1253,49 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
hashes = set()
|
||||
media_ids = set()
|
||||
remote_media = set()
|
||||
|
||||
# Update all the tables to set the quarantined_by flag
|
||||
sql = """
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE media_id = ?
|
||||
"""
|
||||
|
||||
# set quarantine
|
||||
if quarantined_by is not None:
|
||||
sql += "AND safe_from_quarantine = FALSE"
|
||||
txn.executemany(
|
||||
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
|
||||
# First, determine the hashes of the media we want to delete.
|
||||
# We also want the media_ids for any media that lacks a hash.
|
||||
if local_mxcs:
|
||||
hash_sql_many_clause_sql, hash_sql_many_clause_args = (
|
||||
make_in_list_sql_clause(txn.database_engine, "media_id", local_mxcs)
|
||||
)
|
||||
# remove from quarantine
|
||||
else:
|
||||
txn.executemany(
|
||||
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
|
||||
hash_sql = f"SELECT sha256, media_id FROM local_media_repository WHERE {hash_sql_many_clause_sql}"
|
||||
if quarantined_by is not None:
|
||||
hash_sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(hash_sql, hash_sql_many_clause_args)
|
||||
for sha256, media_id in txn:
|
||||
if sha256:
|
||||
hashes.add(sha256)
|
||||
else:
|
||||
media_ids.add(media_id)
|
||||
|
||||
# Do the same for remote media
|
||||
if remote_mxcs:
|
||||
hash_sql_in_list_clause, hash_sql_args = make_tuple_in_list_sql_clause(
|
||||
txn.database_engine,
|
||||
("media_origin", "media_id"),
|
||||
remote_mxcs,
|
||||
)
|
||||
|
||||
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
||||
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
|
||||
hash_sql = f"SELECT sha256, media_origin, media_id FROM remote_media_cache WHERE {hash_sql_in_list_clause}"
|
||||
txn.execute(hash_sql, hash_sql_args)
|
||||
for sha256, media_origin, media_id in txn:
|
||||
if sha256:
|
||||
hashes.add(sha256)
|
||||
else:
|
||||
remote_media.add((media_origin, media_id))
|
||||
|
||||
txn.executemany(
|
||||
"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE media_origin = ? AND media_id = ?
|
||||
""",
|
||||
[(quarantined_by, origin, media_id) for origin, media_id in remote_mxcs],
|
||||
count = self._quarantine_local_media_txn(txn, hashes, media_ids, quarantined_by)
|
||||
count += self._quarantine_remote_media_txn(
|
||||
txn, hashes, remote_media, quarantined_by
|
||||
)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
return count
|
||||
|
||||
async def block_room(self, room_id: str, user_id: str) -> None:
|
||||
"""Marks the room as blocked.
|
||||
|
||||
@@ -1622,14 +1622,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
sql = """
|
||||
UPDATE room_memberships
|
||||
SET participant = true
|
||||
WHERE (user_id, room_id) IN (
|
||||
SELECT user_id, room_id
|
||||
FROM room_memberships
|
||||
WHERE user_id = ?
|
||||
AND room_id = ?
|
||||
ORDER BY event_stream_ordering DESC
|
||||
LIMIT 1
|
||||
WHERE event_id IN (
|
||||
SELECT event_id FROM local_current_membership
|
||||
WHERE user_id = ? AND room_id = ?
|
||||
)
|
||||
AND NOT participant
|
||||
"""
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
|
||||
@@ -1651,11 +1648,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
) -> bool:
|
||||
sql = """
|
||||
SELECT participant
|
||||
FROM room_memberships
|
||||
WHERE user_id = ?
|
||||
AND room_id = ?
|
||||
ORDER BY event_stream_ordering DESC
|
||||
LIMIT 1
|
||||
FROM local_current_membership AS l
|
||||
INNER JOIN room_memberships AS r USING (event_id)
|
||||
WHERE l.user_id = ?
|
||||
AND l.room_id = ?
|
||||
"""
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
res = txn.fetchone()
|
||||
|
||||
@@ -48,6 +48,7 @@ from synapse.storage.database import (
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.storage.util.sequence import build_sequence_generator
|
||||
from synapse.types import MutableStateMap, StateKey, StateMap
|
||||
@@ -914,6 +915,12 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
) -> None:
|
||||
# Delete all edges that reference a state group linked to room_id
|
||||
logger.info("[purge] removing %s from state_group_edges", room_id)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# Disable statement timeouts for this transaction; purging rooms can
|
||||
# take a while!
|
||||
txn.execute("SET LOCAL statement_timeout = 0")
|
||||
|
||||
txn.execute(
|
||||
"""
|
||||
DELETE FROM state_group_edges AS sge WHERE sge.state_group IN (
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
SCHEMA_VERSION = 90 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 91 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Store the SHA256 content hash of media files.
|
||||
ALTER TABLE local_media_repository ADD COLUMN sha256 TEXT;
|
||||
ALTER TABLE remote_media_cache ADD COLUMN sha256 TEXT;
|
||||
|
||||
-- Add a background updates to handle creating the new index.
|
||||
--
|
||||
-- Note that the ordering of the update is not following the usual scheme. This
|
||||
-- is because when upgrading from Synapse 1.127, this index is fairly important
|
||||
-- to have up quickly, so that it doesn't tank performance, which is why it is
|
||||
-- scheduled before other background updates in the 1.127 -> 1.128 upgrade
|
||||
INSERT INTO
|
||||
background_updates (ordering, update_name, progress_json)
|
||||
VALUES
|
||||
(8890, 'local_media_repository_sha256_idx', '{}'),
|
||||
(8891, 'remote_media_cache_sha256_idx', '{}');
|
||||
@@ -0,0 +1,15 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Remove the old unreferenced state group deletion background update if it exists
|
||||
DELETE FROM background_updates WHERE update_name = 'delete_unreferenced_state_groups_bg_update';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user