Compare commits
98 Commits
v1.126.0rc
...
devon/medi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82ab47ab79 | ||
|
|
6b9fa8f516 | ||
|
|
d59bbd8b6b | ||
|
|
7be6c711d4 | ||
|
|
5ab05e7b95 | ||
|
|
7563b2a2a3 | ||
|
|
4097ada89f | ||
|
|
f79811ed80 | ||
|
|
4eaab31757 | ||
|
|
ad140130cc | ||
|
|
e47de2b32d | ||
|
|
0384fd72ee | ||
|
|
75832f25b0 | ||
|
|
7346760aed | ||
|
|
b0795d0cb6 | ||
|
|
2ef7824620 | ||
|
|
39e17856a3 | ||
|
|
4c958c679a | ||
|
|
a87981f673 | ||
|
|
2ff977a6c3 | ||
|
|
1482ad1917 | ||
|
|
5b89c92643 | ||
|
|
33824495ba | ||
|
|
89cb613a4e | ||
|
|
c16a981f22 | ||
|
|
0046d7278b | ||
|
|
2c7a61e311 | ||
|
|
45420b1d42 | ||
|
|
19b0e23c3d | ||
|
|
a832375bfb | ||
|
|
ae701e1709 | ||
|
|
dd05cc55ee | ||
|
|
081f6ad50f | ||
|
|
b30fcb03cc | ||
|
|
0e3c0aeee8 | ||
|
|
5c84f25809 | ||
|
|
770768614b | ||
|
|
b8b3896b1d | ||
|
|
01efc49554 | ||
|
|
fa53a8512a | ||
|
|
fdbcb821ff | ||
|
|
8eb991b746 | ||
|
|
87d374c639 | ||
|
|
1709234311 | ||
|
|
80b62d7903 | ||
|
|
7ace290f07 | ||
|
|
2f812c2eb6 | ||
|
|
90f346183a | ||
|
|
f638a76ba4 | ||
|
|
cf02b8fea5 | ||
|
|
1deb6e03e0 | ||
|
|
02eed668b8 | ||
|
|
9f8ed14535 | ||
|
|
3bc04d05a4 | ||
|
|
4dba011c31 | ||
|
|
76ffd3ba01 | ||
|
|
3c188231c7 | ||
|
|
d17295e5c3 | ||
|
|
a39b856cf0 | ||
|
|
2830013e5e | ||
|
|
ecc09b15f1 | ||
|
|
31110f35d9 | ||
|
|
2277df2a1e | ||
|
|
5e83434f3a | ||
|
|
a227d20c25 | ||
|
|
bd08a01fc8 | ||
|
|
92a29dcffc | ||
|
|
2719bd1794 | ||
|
|
7af299b365 | ||
|
|
d8fef721a0 | ||
|
|
1efb826b54 | ||
|
|
33bcef9dc7 | ||
|
|
51deadec41 | ||
|
|
47e295bf3a | ||
|
|
4b8dbe22c0 | ||
|
|
bfafd0f2c7 | ||
|
|
d61bdff7a4 | ||
|
|
4d2c4ce92b | ||
|
|
79081e1be5 | ||
|
|
51df675c05 | ||
|
|
59a15da433 | ||
|
|
a278c0d852 | ||
|
|
929f19b472 | ||
|
|
60b3cd0650 | ||
|
|
df044a3667 | ||
|
|
04814a48de | ||
|
|
698278ba50 | ||
|
|
74cc353961 | ||
|
|
caa2012154 | ||
|
|
5064f35958 | ||
|
|
c30157b3cb | ||
|
|
fda1ffe5b8 | ||
|
|
a4c476305e | ||
|
|
1803a62db4 | ||
|
|
8295de87a7 | ||
|
|
350e84a8a4 | ||
|
|
69aceef8f6 | ||
|
|
b7946c29be |
10
.ci/before_build_wheel.sh
Normal file
10
.ci/before_build_wheel.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -xeu
|
||||
|
||||
# On 32-bit Linux platforms, we need libatomic1 to use rustup
|
||||
if command -v yum &> /dev/null; then
|
||||
yum install -y libatomic
|
||||
fi
|
||||
|
||||
# Install a Rust toolchain
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal
|
||||
@@ -11,12 +11,12 @@ with open("poetry.lock", "rb") as f:
|
||||
|
||||
try:
|
||||
lock_version = lockfile["metadata"]["lock-version"]
|
||||
assert lock_version == "2.0"
|
||||
assert lock_version == "2.1"
|
||||
except Exception:
|
||||
print(
|
||||
"""\
|
||||
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
|
||||
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
|
||||
Lockfile is not version 2.1. You probably need to upgrade poetry on your local box
|
||||
and re-run `poetry lock`. See the Poetry cheat sheet at
|
||||
https://element-hq.github.io/synapse/develop/development/dependencies.html
|
||||
"""
|
||||
)
|
||||
|
||||
16
.github/workflows/docker.yml
vendored
16
.github/workflows/docker.yml
vendored
@@ -18,22 +18,22 @@ jobs:
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
@@ -43,13 +43,13 @@ jobs:
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
with:
|
||||
images: |
|
||||
docker.io/matrixdotorg/synapse
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Build and push all platforms
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
||||
4
.github/workflows/docs-pr-netlify.yaml
vendored
4
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
|
||||
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v3
|
||||
uses: matrix-org/netlify-pr-preview@9805cd123fc9a7e421e35340a05e1ebc5dee46b5 # v3
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
|
||||
8
.github/workflows/docs-pr.yaml
vendored
8
.github/workflows/docs-pr.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
11
.github/workflows/fix_lint.yaml
vendored
11
.github/workflows/fix_lint.yaml
vendored
@@ -13,21 +13,22 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||
with:
|
||||
# We use nightly so that `fmt` correctly groups together imports, and
|
||||
# clippy correctly fixes up the benchmarks.
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy, rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
install-project: "false"
|
||||
poetry-version: "2.1.1"
|
||||
|
||||
- name: Run ruff check
|
||||
continue-on-error: true
|
||||
@@ -43,6 +44,6 @@ jobs:
|
||||
- run: cargo fmt
|
||||
continue-on-error: true
|
||||
|
||||
- uses: stefanzweifel/git-auto-commit-action@v5
|
||||
- uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0
|
||||
with:
|
||||
commit_message: "Attempt to fix linting"
|
||||
|
||||
34
.github/workflows/latest_deps.yml
vendored
34
.github/workflows/latest_deps.yml
vendored
@@ -39,17 +39,17 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -72,11 +72,11 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -145,11 +145,11 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -192,15 +192,15 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@v4
|
||||
- name: Check out synapse codebase
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -225,7 +225,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/poetry_lockfile.yaml
vendored
4
.github/workflows/poetry_lockfile.yaml
vendored
@@ -16,8 +16,8 @@ jobs:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
|
||||
10
.github/workflows/push_complement_image.yml
vendored
10
.github/workflows/push_complement_image.yml
vendored
@@ -33,29 +33,29 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
|
||||
34
.github/workflows/release-artifacts.yml
vendored
34
.github/workflows/release-artifacts.yml
vendored
@@ -27,8 +27,8 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- id: set-distros
|
||||
@@ -55,18 +55,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
|
||||
path: debs/*
|
||||
@@ -130,20 +130,20 @@ jobs:
|
||||
arch: aarch64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.19.1
|
||||
run: python -m pip install cibuildwheel==2.23.0
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
@@ -165,7 +165,7 @@ jobs:
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: ./wheelhouse/*.whl
|
||||
@@ -176,8 +176,8 @@ jobs:
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
- name: Build a tarball for the debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
|
||||
uses: softprops/action-gh-release@v0.1.15
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
150
.github/workflows/tests.yml
vendored
150
.github/workflows/tests.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
@@ -83,14 +83,14 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/generate_sample_config.sh --check
|
||||
- run: poetry run scripts-dev/config-lint.sh
|
||||
@@ -101,8 +101,8 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
@@ -111,8 +111,8 @@ jobs:
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
@@ -124,11 +124,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
poetry-version: "2.1.1"
|
||||
install-project: "false"
|
||||
|
||||
- name: Run ruff check
|
||||
@@ -145,14 +146,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
# We want to make use of type hints in optional dependencies too.
|
||||
extras: all
|
||||
@@ -161,11 +162,12 @@ jobs:
|
||||
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
|
||||
# To make CI green, err towards caution and install the project.
|
||||
install-project: "true"
|
||||
poetry-version: "2.1.1"
|
||||
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
@@ -178,7 +180,7 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
@@ -186,11 +188,11 @@ jobs:
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
@@ -204,15 +206,15 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
@@ -222,13 +224,13 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
with:
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
@@ -240,14 +242,14 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
|
||||
@@ -257,15 +259,15 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||
with:
|
||||
# We use nightly so that it correctly groups together imports
|
||||
toolchain: nightly-2022-12-01
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
@@ -276,8 +278,8 @@ jobs:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
@@ -301,7 +303,7 @@ jobs:
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v3
|
||||
- uses: matrix-org/done-action@3409aa904e8a2aaf2220f09bc954d3d0b0a2ee67 # v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
@@ -324,8 +326,8 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
@@ -345,7 +347,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -360,13 +362,13 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: ${{ matrix.job.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -399,11 +401,11 @@ jobs:
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
@@ -412,7 +414,7 @@ jobs:
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
@@ -462,13 +464,13 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
@@ -512,13 +514,13 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
@@ -527,7 +529,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
@@ -557,11 +559,11 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
env:
|
||||
@@ -601,7 +603,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
@@ -612,10 +614,10 @@ jobs:
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "2.1.1"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
id: run_tester_script
|
||||
@@ -625,7 +627,7 @@ jobs:
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
- name: "Upload schema differences"
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
|
||||
with:
|
||||
name: Schema dumps
|
||||
@@ -655,19 +657,19 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout synapse codebase
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -690,11 +692,11 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: cargo test
|
||||
|
||||
@@ -708,13 +710,13 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- run: cargo bench --no-run
|
||||
|
||||
@@ -733,7 +735,7 @@ jobs:
|
||||
- linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v3
|
||||
- uses: matrix-org/done-action@3409aa904e8a2aaf2220f09bc954d3d0b0a2ee67 # v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
|
||||
2
.github/workflows/triage-incoming.yml
vendored
2
.github/workflows/triage-incoming.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2
|
||||
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@18beaf3c8e536108bd04d18e6c3dc40ba3931e28 # v2.0.3
|
||||
with:
|
||||
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
|
||||
content_id: ${{ github.event.issue.node_id }}
|
||||
|
||||
2
.github/workflows/triage_labelled.yml
vendored
2
.github/workflows/triage_labelled.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@main
|
||||
- uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
36
.github/workflows/twisted_trunk.yml
vendored
36
.github/workflows/twisted_trunk.yml
vendored
@@ -40,16 +40,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
extras: "all"
|
||||
poetry-version: "2.1.1"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
|
||||
@@ -64,17 +65,18 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
extras: "all test"
|
||||
poetry-version: "2.1.1"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
@@ -108,11 +110,11 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1)
|
||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
@@ -136,7 +138,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -164,14 +166,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -181,11 +183,11 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.3.2
|
||||
pipx install poetry==2.1.1
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
poetry lock
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
@@ -206,7 +208,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
126
CHANGES.md
126
CHANGES.md
@@ -1,9 +1,133 @@
|
||||
# Synapse 1.126.0rc1 (2025-03-04)
|
||||
# Synapse 1.128.0 (2025-04-08)
|
||||
|
||||
No significant changes since 1.128.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.128.0rc1 (2025-04-01)
|
||||
|
||||
### Features
|
||||
|
||||
- Add an access token introspection cache to make Matrix Authentication Service integration ([MSC3861](https://github.com/matrix-org/matrix-doc/pull/3861)) more efficient. ([\#18231](https://github.com/element-hq/synapse/issues/18231))
|
||||
- Add background job to clear unreferenced state groups. ([\#18254](https://github.com/element-hq/synapse/issues/18254))
|
||||
- Hashes of media files are now tracked by Synapse. Media quarantines will now apply to all files with the same hash. ([\#18277](https://github.com/element-hq/synapse/issues/18277), [\#18302](https://github.com/element-hq/synapse/issues/18302), [\#18296](https://github.com/element-hq/synapse/issues/18296))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add index to sliding sync ([MSC4186](https://github.com/matrix-org/matrix-doc/pull/4186)) membership snapshot table, to fix a performance issue. ([\#18074](https://github.com/element-hq/synapse/issues/18074))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Specify the architecture of installed packages via an APT config option, which is more reliable than appending package names with `:{arch}`. ([\#18271](https://github.com/element-hq/synapse/issues/18271))
|
||||
- Always specify base image debian versions with a build argument. ([\#18272](https://github.com/element-hq/synapse/issues/18272))
|
||||
- Allow passing arguments to `start_for_complement.sh` (to be sent to `configure_workers_and_start.py`). ([\#18273](https://github.com/element-hq/synapse/issues/18273))
|
||||
- Make some improvements to the `prefix-log` script in the workers image. ([\#18274](https://github.com/element-hq/synapse/issues/18274))
|
||||
- Use `uv pip` to install `supervisor` in the worker image. ([\#18275](https://github.com/element-hq/synapse/issues/18275))
|
||||
- Avoid needing to download & use `rsync` in a build layer. ([\#18287](https://github.com/element-hq/synapse/issues/18287))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix how to obtain access token and change naming from riot to element ([\#18225](https://github.com/element-hq/synapse/issues/18225))
|
||||
- Correct a small typo in the SSO mapping providers documentation. ([\#18276](https://github.com/element-hq/synapse/issues/18276))
|
||||
- Add docs for how to clear out the Poetry wheel cache. ([\#18283](https://github.com/element-hq/synapse/issues/18283))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add a column `participant` to `room_memberships` table. ([\#18068](https://github.com/element-hq/synapse/issues/18068))
|
||||
- Update Poetry to 2.1.1, including updating the lock file version. ([\#18251](https://github.com/element-hq/synapse/issues/18251))
|
||||
- Pin GitHub Actions dependencies by commit hash. ([\#18255](https://github.com/element-hq/synapse/issues/18255))
|
||||
- Add DB delta to remove the old state group deletion job. ([\#18284](https://github.com/element-hq/synapse/issues/18284))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from f5473ace9aeee8b97717b281e26980aa5097023f to 280af8ae1f83a494cfad2cb10f02f6d13529caa9. ([\#18303](https://github.com/element-hq/synapse/issues/18303))
|
||||
* Bump actions/cache from 4.2.2 to 4.2.3. ([\#18266](https://github.com/element-hq/synapse/issues/18266))
|
||||
* Bump actions/download-artifact from 4.2.0 to 4.2.1. ([\#18268](https://github.com/element-hq/synapse/issues/18268))
|
||||
* Bump actions/setup-python from 5.4.0 to 5.5.0. ([\#18298](https://github.com/element-hq/synapse/issues/18298))
|
||||
* Bump actions/upload-artifact from 4.6.1 to 4.6.2. ([\#18304](https://github.com/element-hq/synapse/issues/18304))
|
||||
* Bump authlib from 1.4.1 to 1.5.1. ([\#18306](https://github.com/element-hq/synapse/issues/18306))
|
||||
* Bump dawidd6/action-download-artifact from 8 to 9. ([\#18204](https://github.com/element-hq/synapse/issues/18204))
|
||||
* Bump jinja2 from 3.1.5 to 3.1.6. ([\#18223](https://github.com/element-hq/synapse/issues/18223))
|
||||
* Bump log from 0.4.26 to 0.4.27. ([\#18267](https://github.com/element-hq/synapse/issues/18267))
|
||||
* Bump phonenumbers from 8.13.50 to 9.0.2. ([\#18299](https://github.com/element-hq/synapse/issues/18299))
|
||||
* Bump pygithub from 2.5.0 to 2.6.1. ([\#18243](https://github.com/element-hq/synapse/issues/18243))
|
||||
* Bump pyo3-log from 0.12.1 to 0.12.2. ([\#18269](https://github.com/element-hq/synapse/issues/18269))
|
||||
|
||||
# Synapse 1.127.1 (2025-03-26)
|
||||
|
||||
## Security
|
||||
- Fix [CVE-2025-30355](https://www.cve.org/CVERecord?id=CVE-2025-30355) / [GHSA-v56r-hwv5-mxg6](https://github.com/element-hq/synapse/security/advisories/GHSA-v56r-hwv5-mxg6). **High severity vulnerability affecting federation. The vulnerability has been exploited in the wild.**
|
||||
|
||||
|
||||
|
||||
# Synapse 1.127.0 (2025-03-25)
|
||||
|
||||
No significant changes since 1.127.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.127.0rc1 (2025-03-18)
|
||||
|
||||
### Features
|
||||
|
||||
- Update [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140) implementation to no longer cancel a user's own delayed state events with an event type & state key that match a more recent state event sent by that user. ([\#17810](https://github.com/element-hq/synapse/issues/17810))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fixed a minor typo in the Synapse documentation. Contributed by @karuto12. ([\#18224](https://github.com/element-hq/synapse/issues/18224))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove undocumented `SYNAPSE_USE_FROZEN_DICTS` environment variable. ([\#18123](https://github.com/element-hq/synapse/issues/18123))
|
||||
- Fix detection of workflow failures in the release script. ([\#18211](https://github.com/element-hq/synapse/issues/18211))
|
||||
- Add caching support to media endpoints. ([\#18235](https://github.com/element-hq/synapse/issues/18235))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.96 to 1.0.97. ([\#18201](https://github.com/element-hq/synapse/issues/18201))
|
||||
* Bump bcrypt from 4.2.1 to 4.3.0. ([\#18207](https://github.com/element-hq/synapse/issues/18207))
|
||||
* Bump bytes from 1.10.0 to 1.10.1. ([\#18227](https://github.com/element-hq/synapse/issues/18227))
|
||||
* Bump http from 1.2.0 to 1.3.1. ([\#18245](https://github.com/element-hq/synapse/issues/18245))
|
||||
* Bump sentry-sdk from 2.19.2 to 2.22.0. ([\#18205](https://github.com/element-hq/synapse/issues/18205))
|
||||
* Bump serde from 1.0.218 to 1.0.219. ([\#18228](https://github.com/element-hq/synapse/issues/18228))
|
||||
* Bump serde_json from 1.0.139 to 1.0.140. ([\#18202](https://github.com/element-hq/synapse/issues/18202))
|
||||
* Bump ulid from 1.2.0 to 1.2.1. ([\#18246](https://github.com/element-hq/synapse/issues/18246))
|
||||
|
||||
# Synapse 1.126.0 (2025-03-11)
|
||||
Administrators using the Debian/Ubuntu packages from `packages.matrix.org`, please check
|
||||
[the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/release-v1.126/docs/upgrade.md#change-of-signing-key-expiry-date-for-the-debianubuntu-package-repository)
|
||||
as we have recently updated the expiry date on the repository's GPG signing key. The old version of the key will expire on `2025-03-15`.
|
||||
|
||||
No significant changes since 1.126.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.126.0rc3 (2025-03-07)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert the background job to clear unreferenced state groups (that was introduced in v1.126.0rc1), due to [a suspected issue](https://github.com/element-hq/synapse/issues/18217) that causes increased disk usage. ([\#18222](https://github.com/element-hq/synapse/issues/18222))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.126.0rc2 (2025-03-05)
|
||||
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix wheel building configuration in CI by installing libatomic1. ([\#18212](https://github.com/element-hq/synapse/issues/18212), [\#18213](https://github.com/element-hq/synapse/issues/18213))
|
||||
|
||||
# Synapse 1.126.0rc1 (2025-03-04)
|
||||
|
||||
Synapse 1.126.0rc1 was not fully released due to an error in CI.
|
||||
|
||||
### Features
|
||||
|
||||
|
||||
56
Cargo.lock
generated
56
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.96"
|
||||
version = "1.0.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.10.0"
|
||||
version = "1.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9"
|
||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@@ -173,9 +173,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
|
||||
checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
@@ -223,9 +223,9 @@ checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.26"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -277,9 +277,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.23.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc"
|
||||
checksum = "7778bffd85cf38175ac1f545509665d0b9b92a198ca7941f131f85f7a4f9a872"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -296,9 +296,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.23.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7"
|
||||
checksum = "94f6cbe86ef3bf18998d9df6e0f3fc1050a8c5efa409bf712e661a4366e010fb"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -306,9 +306,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.23.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d"
|
||||
checksum = "e9f1b4c431c0bb1c8fb0a338709859eed0d030ff6daa34368d3b152a63dfdd8d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -316,9 +316,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.12.1"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be5bb22b77965a7b5394e9aae9897a0607b51df5167561ffc3b02643b4200bc7"
|
||||
checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -327,9 +327,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.23.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7"
|
||||
checksum = "fbc2201328f63c4710f68abdf653c89d8dbc2858b88c5d88b0ff38a75288a9da"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -339,9 +339,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.23.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4"
|
||||
checksum = "fca6726ad0f3da9c9de093d6f116a93c1a38e417ed73bf138472cf4064f72028"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -437,18 +437,18 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.218"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.218"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -457,9 +457,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.139"
|
||||
version = "1.0.140"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -544,9 +544,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab82fc73182c29b02e2926a6df32f2241dbadb5cfc111fd595515b3598f46bb3"
|
||||
checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe"
|
||||
dependencies = [
|
||||
"rand",
|
||||
"web-time",
|
||||
|
||||
1
changelog.d/18133.misc
Normal file
1
changelog.d/18133.misc
Normal file
@@ -0,0 +1 @@
|
||||
Disable statement timeout during room purge.
|
||||
1
changelog.d/18232.feature
Normal file
1
changelog.d/18232.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `passthrough_authorization_parameters` in OIDC configuration to allow to pass parameters to the authorization grant URL.
|
||||
1
changelog.d/18237.doc
Normal file
1
changelog.d/18237.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.
|
||||
1
changelog.d/18291.docker
Normal file
1
changelog.d/18291.docker
Normal file
@@ -0,0 +1 @@
|
||||
In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.
|
||||
1
changelog.d/18292.docker
Normal file
1
changelog.d/18292.docker
Normal file
@@ -0,0 +1 @@
|
||||
Optimize the build of the workers image.
|
||||
1
changelog.d/18293.docker
Normal file
1
changelog.d/18293.docker
Normal file
@@ -0,0 +1 @@
|
||||
In start_for_complement.sh, replace some external program calls with shell builtins.
|
||||
1
changelog.d/18294.docker
Normal file
1
changelog.d/18294.docker
Normal file
@@ -0,0 +1 @@
|
||||
Optimize the build of the complement-synapse image.
|
||||
1
changelog.d/18295.docker
Normal file
1
changelog.d/18295.docker
Normal file
@@ -0,0 +1 @@
|
||||
When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.
|
||||
1
changelog.d/18320.doc
Normal file
1
changelog.d/18320.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.
|
||||
1
changelog.d/18334.bugfix
Normal file
1
changelog.d/18334.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix `force_tracing_for_users` config when using delegated auth.
|
||||
1
changelog.d/18335.bugfix
Normal file
1
changelog.d/18335.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix the token introspection cache logging access tokens when MAS integration is in use.
|
||||
1
changelog.d/18337.misc
Normal file
1
changelog.d/18337.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add cache to storage functions used to auth requests when using delegated auth.
|
||||
1
changelog.d/18339.bugfix
Normal file
1
changelog.d/18339.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Stop caching introspection failures when delegating auth to MAS.
|
||||
1
changelog.d/18342.bugfix
Normal file
1
changelog.d/18342.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic.
|
||||
1
changelog.d/18345.bugfix
Normal file
1
changelog.d/18345.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0.
|
||||
1
changelog.d/18355.feature
Normal file
1
changelog.d/18355.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for handling `GET /devices/` on workers.
|
||||
1
changelog.d/18360.misc
Normal file
1
changelog.d/18360.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow `/rooms/` admin API to be run on workers.
|
||||
1
changelog.d/18363.bugfix
Normal file
1
changelog.d/18363.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.
|
||||
1
changelog.d/18367.misc
Normal file
1
changelog.d/18367.misc
Normal file
@@ -0,0 +1 @@
|
||||
Minor performance improvements to the notifier.
|
||||
1
changelog.d/18369.misc
Normal file
1
changelog.d/18369.misc
Normal file
@@ -0,0 +1 @@
|
||||
Slight performance increase when using the ratelimiter.
|
||||
1
changelog.d/18378.bugfix
Normal file
1
changelog.d/18378.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Allow client & media admin apis to coexist.
|
||||
2
debian/build_virtualenv
vendored
2
debian/build_virtualenv
vendored
@@ -35,7 +35,7 @@ TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.3.2
|
||||
pip install poetry==2.1.1 poetry-plugin-export==1.9.0
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
|
||||
49
debian/changelog
vendored
49
debian/changelog
vendored
@@ -1,3 +1,52 @@
|
||||
matrix-synapse-py3 (1.128.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.128.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Apr 2025 14:09:54 +0100
|
||||
|
||||
matrix-synapse-py3 (1.128.0~rc1) stable; urgency=medium
|
||||
|
||||
* Update Poetry to 2.1.1.
|
||||
* New synapse release 1.128.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Apr 2025 14:35:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.127.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Mar 2025 21:07:31 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.127.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Mar 2025 12:04:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.127.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.127.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Mar 2025 13:30:05 +0000
|
||||
|
||||
matrix-synapse-py3 (1.126.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.126.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Mar 2025 13:11:29 +0000
|
||||
|
||||
matrix-synapse-py3 (1.126.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.126.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 07 Mar 2025 15:45:05 +0000
|
||||
|
||||
matrix-synapse-py3 (1.126.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.126.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 05 Mar 2025 14:29:12 +0000
|
||||
|
||||
matrix-synapse-py3 (1.126.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.126.0rc1.
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG POETRY_VERSION=1.8.3
|
||||
ARG POETRY_VERSION=2.1.1
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
@@ -56,7 +56,7 @@ ENV UV_LINK_MODE=copy
|
||||
ARG POETRY_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
uvx --with poetry-plugin-export==1.8.0 \
|
||||
uvx --with poetry-plugin-export==1.9.0 \
|
||||
poetry@${POETRY_VERSION} export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
else \
|
||||
touch /synapse/requirements.txt; \
|
||||
@@ -134,7 +134,6 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
apt-get install -y --no-install-recommends rsync && \
|
||||
apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
|
||||
curl \
|
||||
gosu \
|
||||
@@ -148,14 +147,10 @@ RUN \
|
||||
for arch in arm64 amd64; do \
|
||||
mkdir -p /tmp/debs-${arch} && \
|
||||
cd /tmp/debs-${arch} && \
|
||||
apt-get download $(sed "s/$/:${arch}/" /tmp/pkg-list); \
|
||||
apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \
|
||||
done
|
||||
|
||||
# Extract the debs for each architecture
|
||||
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
|
||||
# libraries to the right place, else the `COPY` won't work.
|
||||
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
|
||||
# already present in the runtime image.
|
||||
RUN \
|
||||
for arch in arm64 amd64; do \
|
||||
mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \
|
||||
@@ -165,8 +160,6 @@ RUN \
|
||||
dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \
|
||||
dpkg --extract $deb /install-${arch}; \
|
||||
done; \
|
||||
rsync -avr /install-${arch}/lib/ /install-${arch}/usr/lib; \
|
||||
rm -rf /install-${arch}/lib /install-${arch}/lib64; \
|
||||
done
|
||||
|
||||
|
||||
@@ -183,7 +176,14 @@ LABEL org.opencontainers.image.documentation='https://github.com/element-hq/syna
|
||||
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
|
||||
|
||||
COPY --from=runtime-deps /install-${TARGETARCH} /
|
||||
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
|
||||
# libraries to the right place, else the `COPY` won't work.
|
||||
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
|
||||
# already present in the runtime image.
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr
|
||||
COPY --from=runtime-deps /install-${TARGETARCH}/var /var
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
|
||||
@@ -2,18 +2,38 @@
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# first of all, we create a base image with dependencies which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM docker.io/library/debian:bookworm-slim AS deps_base
|
||||
FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
|
||||
|
||||
# Tell apt to keep downloaded package files, as we're using cache mounts.
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
nginx-light
|
||||
|
||||
RUN \
|
||||
# remove default page
|
||||
rm /etc/nginx/sites-enabled/default && \
|
||||
# have nginx log to stderr/out
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
|
||||
# (mounted as --mount=type=cache) and the target directory.
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
|
||||
|
||||
RUN mkdir -p /uv/etc/supervisor/conf.d
|
||||
|
||||
# Similarly, a base to copy the redis server from.
|
||||
#
|
||||
@@ -21,31 +41,21 @@ FROM docker.io/library/debian:bookworm-slim AS deps_base
|
||||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM docker.io/library/redis:7-bookworm AS redis_base
|
||||
FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
RUN mkdir -p /etc/supervisor/conf.d
|
||||
|
||||
# Copy over redis and nginx
|
||||
# Copy over dependencies
|
||||
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
|
||||
|
||||
COPY --from=deps_base /uv /
|
||||
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
|
||||
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
|
||||
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
COPY --from=deps_base /var/log/nginx /var/log/nginx
|
||||
# chown to allow non-root user to write to http-*-temp-path dirs
|
||||
COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
@@ -64,4 +74,4 @@ FROM $FROM
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
CMD ["/healthcheck.sh"]
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
# This is an intermediate image, to be built locally (not pulled from a registry).
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
|
||||
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
|
||||
|
||||
FROM $FROM
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
@@ -20,9 +23,9 @@ FROM $FROM
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
@@ -55,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
|
||||
|
||||
# Update the healthcheck to have a shorter check interval
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
CMD ["/healthcheck.sh"]
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
set -e
|
||||
|
||||
echo "Complement Synapse launcher"
|
||||
echo " Args: $@"
|
||||
echo " Args: $*"
|
||||
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
|
||||
|
||||
function log {
|
||||
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
|
||||
echo "$d $@"
|
||||
d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
|
||||
echo "$d $*"
|
||||
}
|
||||
|
||||
# Set the server name of the homeserver
|
||||
@@ -103,12 +103,11 @@ fi
|
||||
# Note that both the key and certificate are in PEM format (not DER).
|
||||
|
||||
# First generate a configuration file to set up a Subject Alternative Name.
|
||||
cat > /conf/server.tls.conf <<EOF
|
||||
echo "\
|
||||
.include /etc/ssl/openssl.cnf
|
||||
|
||||
[SAN]
|
||||
subjectAltName=DNS:${SERVER_NAME}
|
||||
EOF
|
||||
subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
|
||||
|
||||
# Generate an RSA key
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
@@ -123,12 +122,12 @@ openssl x509 -req -in /conf/server.tls.csr \
|
||||
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
|
||||
|
||||
# Assert that we have a Subject Alternative Name in the certificate.
|
||||
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
|
||||
# (the test will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
|
||||
|
||||
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
|
||||
export SYNAPSE_TLS_KEY=/conf/server.tls.key
|
||||
|
||||
# Run the script that writes the necessary config files and starts supervisord, which in turn
|
||||
# starts everything else
|
||||
exec /configure_workers_and_start.py
|
||||
exec /configure_workers_and_start.py "$@"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/local/bin/python
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
@@ -376,9 +376,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
#
|
||||
# We use append mode in case the files have already been written to by something else
|
||||
# (for instance, as part of the instructions in a dockerfile).
|
||||
exists = os.path.isfile(dst)
|
||||
with open(dst, "a") as outfile:
|
||||
# In case the existing file doesn't end with a newline
|
||||
outfile.write("\n")
|
||||
if exists:
|
||||
outfile.write("\n")
|
||||
|
||||
outfile.write(rendered)
|
||||
|
||||
@@ -604,7 +606,7 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
@@ -998,6 +1000,7 @@ def generate_worker_files(
|
||||
"/healthcheck.sh",
|
||||
healthcheck_urls=healthcheck_urls,
|
||||
)
|
||||
os.chmod("/healthcheck.sh", 0o755)
|
||||
|
||||
# Ensure the logging directory exists
|
||||
log_dir = data_dir + "/logs"
|
||||
|
||||
@@ -10,6 +10,9 @@
|
||||
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
|
||||
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
|
||||
# confusion due to to interleaving of the different processes.
|
||||
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
|
||||
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
|
||||
prefixer() {
|
||||
mawk -W interactive '{printf("%s | %s\n", ENVIRON["SUPERVISOR_PROCESS_NAME"], $0); fflush() }'
|
||||
}
|
||||
exec 1> >(prefixer)
|
||||
exec 2> >(prefixer >&2)
|
||||
exec "$@"
|
||||
|
||||
@@ -46,6 +46,14 @@ to any local media, and any locally-cached copies of remote media.
|
||||
|
||||
The media file itself (and any thumbnails) is not deleted from the server.
|
||||
|
||||
Since Synapse 1.128.0, hashes of uploaded media are tracked. If this media
|
||||
is quarantined, Synapse will:
|
||||
|
||||
- Quarantine any media with a matching hash that has already been uploaded.
|
||||
- Quarantine any future media.
|
||||
- Quarantine any existing cached remote media.
|
||||
- Quarantine any future remote media.
|
||||
|
||||
## Quarantining media by ID
|
||||
|
||||
This API quarantines a single piece of local or remote media.
|
||||
|
||||
@@ -150,6 +150,28 @@ $ poetry shell
|
||||
$ poetry install --extras all
|
||||
```
|
||||
|
||||
If you want to go even further and remove the Poetry caches:
|
||||
|
||||
```shell
|
||||
# Find your Poetry cache directory
|
||||
# Docs: https://github.com/python-poetry/poetry/blob/main/docs/configuration.md#cache-directory
|
||||
$ poetry config cache-dir
|
||||
|
||||
# Remove packages from all cached repositories
|
||||
$ poetry cache clear --all .
|
||||
|
||||
# Go completely nuclear and clear out everything Poetry cache related
|
||||
# including the wheel artifacts which is not covered by the above command
|
||||
# (see https://github.com/python-poetry/poetry/issues/10304)
|
||||
#
|
||||
# This is necessary in order to rebuild or fetch new wheels. For example, if you update
|
||||
# the `icu` library in on your system, you will need to rebuild the PyICU Python package
|
||||
# in order to incorporate the correct dynamically linked library locations otherwise you
|
||||
# will run into errors like: `ImportError: libicui18n.so.75: cannot open shared object file: No such file or directory`
|
||||
$ rm -rf $(poetry config cache-dir)
|
||||
```
|
||||
|
||||
|
||||
## ...run a command in the `poetry` virtualenv?
|
||||
|
||||
Use `poetry run cmd args` when you need the python virtualenv context.
|
||||
@@ -187,7 +209,7 @@ useful.
|
||||
## ...add a new dependency?
|
||||
|
||||
Either:
|
||||
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
|
||||
- manually update `pyproject.toml`; then `poetry lock`; or else
|
||||
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
|
||||
`--extras` and `--optional` flags in particular.
|
||||
|
||||
@@ -202,12 +224,12 @@ poetry remove packagename
|
||||
```
|
||||
|
||||
ought to do the trick. Alternatively, manually update `pyproject.toml` and
|
||||
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
|
||||
`poetry lock`. Include the updated `pyproject.toml` and `poetry.lock`
|
||||
files in your commit.
|
||||
|
||||
## ...update the version range for an existing dependency?
|
||||
|
||||
Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`.
|
||||
Best done by manually editing `pyproject.toml`, then `poetry lock`.
|
||||
Include the updated `pyproject.toml` and `poetry.lock` in your commit.
|
||||
|
||||
## ...update a dependency in the locked environment?
|
||||
@@ -233,7 +255,7 @@ poetry add packagename==1.2.3
|
||||
|
||||
# Get poetry to recompute the content-hash of pyproject.toml without changing
|
||||
# the locked package versions.
|
||||
poetry lock --no-update
|
||||
poetry lock
|
||||
```
|
||||
|
||||
Either way, include the updated `poetry.lock` file in your commit.
|
||||
|
||||
@@ -23,6 +23,7 @@ such as [Github][github-idp].
|
||||
[auth0]: https://auth0.com/
|
||||
[authentik]: https://goauthentik.io/
|
||||
[lemonldap]: https://lemonldap-ng.org/
|
||||
[pocket-id]: https://pocket-id.org/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
|
||||
@@ -624,6 +625,32 @@ oidc_providers:
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
### Pocket ID
|
||||
|
||||
[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys.
|
||||
1. Go to `OIDC Clients`
|
||||
2. Click on `Add OIDC Client`
|
||||
3. Add a name, for example `Synapse`
|
||||
4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain
|
||||
5. Click on `Save`
|
||||
6. Note down your `Client ID` and `Client secret`, these will be used later
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: pocket_id
|
||||
idp_name: Pocket ID
|
||||
issuer: "https://auth.example.org/" # Replace with your domain
|
||||
client_id: "your-client-id" # Replace with the "Client ID" you noted down before
|
||||
client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Shibboleth with OIDC Plugin
|
||||
|
||||
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
|
||||
|
||||
@@ -10,7 +10,7 @@ As an example, a SSO service may return the email address
|
||||
to turn that into a displayname when creating a Matrix user for this individual.
|
||||
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
|
||||
variations. As each Synapse configuration may want something different, this is
|
||||
where SAML mapping providers come into play.
|
||||
where SSO mapping providers come into play.
|
||||
|
||||
SSO mapping providers are currently supported for OpenID and SAML SSO
|
||||
configurations. Please see the details below for how to implement your own.
|
||||
|
||||
@@ -160,7 +160,7 @@ Using the following curl command:
|
||||
```console
|
||||
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
|
||||
```
|
||||
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
|
||||
`<access-token>` - can be obtained in element by looking in All settings, clicking Help & About and down the bottom is:
|
||||
Access Token:\<click to reveal\>
|
||||
|
||||
`<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org
|
||||
|
||||
@@ -3672,6 +3672,9 @@ Options for each entry include:
|
||||
* `additional_authorization_parameters`: String to string dictionary that will be passed as
|
||||
additional parameters to the authorization grant URL.
|
||||
|
||||
* `passthrough_authorization_parameters`: List of parameters that will be passed through from the redirect endpoint
|
||||
to the authorization grant URL.
|
||||
|
||||
* `allow_existing_users`: set to true to allow a user logging in via OIDC to
|
||||
match a pre-existing account instead of failing. This could be used if
|
||||
switching from password logins to OIDC. Defaults to false.
|
||||
@@ -3798,6 +3801,7 @@ oidc_providers:
|
||||
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
additional_authorization_parameters:
|
||||
acr_values: 2fa
|
||||
passthrough_authorization_parameters: ["login_hint"]
|
||||
skip_verification: true
|
||||
enable_registration: true
|
||||
user_mapping_provider:
|
||||
@@ -4014,7 +4018,7 @@ This option has a number of sub-options. They are as follows:
|
||||
* `include_content`: Clients requesting push notifications can either have the body of
|
||||
the message sent in the notification poke along with other details
|
||||
like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
If clients choose the to have the body sent, this option controls whether the
|
||||
If clients choose to have the body sent, this option controls whether the
|
||||
notification request includes the content of the event (other details
|
||||
like the sender are still included). If `event_id_only` is enabled, it
|
||||
has no effect.
|
||||
|
||||
@@ -249,13 +249,14 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
^/_matrix/client/(r0|v3|unstable)/room_keys/
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload/
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload$
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||
@@ -280,6 +281,7 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
|
||||
351
poetry.lock
generated
351
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.126.0rc1"
|
||||
version = "1.128.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -390,7 +390,7 @@ skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aa
|
||||
#
|
||||
# We temporarily pin Rust to 1.82.0 to work around
|
||||
# https://github.com/element-hq/synapse/issues/17988
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal"
|
||||
before-all = "sh .ci/before_build_wheel.sh"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
|
||||
@@ -30,7 +30,7 @@ http = "1.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
mime = "0.3.17"
|
||||
pyo3 = { version = "0.23.2", features = [
|
||||
pyo3 = { version = "0.23.5", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
"abi3",
|
||||
|
||||
@@ -592,7 +592,7 @@ def _wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
if all(
|
||||
workflow["status"] != "in_progress" for workflow in resp["workflow_runs"]
|
||||
):
|
||||
success = (
|
||||
success = all(
|
||||
workflow["status"] == "completed" for workflow in resp["workflow_runs"]
|
||||
)
|
||||
if success:
|
||||
|
||||
@@ -128,6 +128,7 @@ BOOLEAN_COLUMNS = {
|
||||
"pushers": ["enabled"],
|
||||
"redactions": ["have_censored"],
|
||||
"remote_media_cache": ["authenticated"],
|
||||
"room_memberships": ["participant"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"sliding_sync_joined_rooms": ["is_encrypted"],
|
||||
@@ -194,7 +195,7 @@ IGNORED_TABLES = {
|
||||
# Porting the auto generated sequence in this table is non-trivial.
|
||||
# None of the entries in this list are mandatory for Synapse to keep working.
|
||||
# If state group disk space is an issue after the port, the
|
||||
# `delete_unreferenced_state_groups_bg_update` background task can be run again.
|
||||
# `mark_unreferenced_state_groups_for_deletion_bg_update` background task can be run again.
|
||||
"state_groups_pending_deletion",
|
||||
# We don't port these tables, as they're a faff and we can regenerate
|
||||
# them anyway.
|
||||
@@ -226,7 +227,7 @@ IGNORED_BACKGROUND_UPDATES = {
|
||||
# Reapplying this background update to the postgres database is unnecessary after
|
||||
# already having waited for the SQLite database to complete all running background
|
||||
# updates.
|
||||
"delete_unreferenced_state_groups_bg_update",
|
||||
"mark_unreferenced_state_groups_for_deletion_bg_update",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
@@ -44,9 +45,11 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
@@ -76,6 +79,61 @@ def scope_to_list(scope: str) -> List[str]:
|
||||
return scope.strip().split(" ")
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntrospectionResult:
|
||||
_inner: IntrospectionToken
|
||||
|
||||
# when we retrieved this token,
|
||||
# in milliseconds since the Unix epoch
|
||||
retrieved_at_ms: int
|
||||
|
||||
def is_active(self, now_ms: int) -> bool:
|
||||
if not self._inner.get("active"):
|
||||
return False
|
||||
|
||||
expires_in = self._inner.get("expires_in")
|
||||
if expires_in is None:
|
||||
return True
|
||||
if not isinstance(expires_in, int):
|
||||
raise InvalidClientTokenError("token `expires_in` is not an int")
|
||||
|
||||
absolute_expiry_ms = expires_in * 1000 + self.retrieved_at_ms
|
||||
return now_ms < absolute_expiry_ms
|
||||
|
||||
def get_scope_list(self) -> List[str]:
|
||||
value = self._inner.get("scope")
|
||||
if not isinstance(value, str):
|
||||
return []
|
||||
return scope_to_list(value)
|
||||
|
||||
def get_sub(self) -> Optional[str]:
|
||||
value = self._inner.get("sub")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_username(self) -> Optional[str]:
|
||||
value = self._inner.get("username")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_name(self) -> Optional[str]:
|
||||
value = self._inner.get("name")
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
return value
|
||||
|
||||
def get_device_id(self) -> Optional[str]:
|
||||
value = self._inner.get("device_id")
|
||||
if value is not None and not isinstance(value, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid device ID in introspection result",
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc]
|
||||
"""An implementation of the private_key_jwt client auth method that includes a kid header.
|
||||
|
||||
@@ -120,6 +178,34 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
# # Token Introspection Cache
|
||||
# This remembers what users/devices are represented by which access tokens,
|
||||
# in order to reduce overall system load:
|
||||
# - on Synapse (as requests are relatively expensive)
|
||||
# - on the network
|
||||
# - on MAS
|
||||
#
|
||||
# Since there is no invalidation mechanism currently,
|
||||
# the entries expire after 2 minutes.
|
||||
# This does mean tokens can be treated as valid by Synapse
|
||||
# for longer than reality.
|
||||
#
|
||||
# Ideally, tokens should logically be invalidated in the following circumstances:
|
||||
# - If a session logout happens.
|
||||
# In this case, MAS will delete the device within Synapse
|
||||
# anyway and this is good enough as an invalidation.
|
||||
# - If the client refreshes their token in MAS.
|
||||
# In this case, the device still exists and it's not the end of the world for
|
||||
# the old access token to continue working for a short time.
|
||||
self._introspection_cache: ResponseCache[str] = ResponseCache(
|
||||
self._clock,
|
||||
"token_introspection",
|
||||
timeout_ms=120_000,
|
||||
# don't log because the keys are access tokens
|
||||
enable_logging=False,
|
||||
)
|
||||
|
||||
self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata](
|
||||
self._load_metadata
|
||||
@@ -193,7 +279,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
metadata = await self._issuer_metadata.get()
|
||||
return metadata.get("introspection_endpoint")
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
async def _introspect_token(
|
||||
self, token: str, cache_context: ResponseCacheContext[str]
|
||||
) -> IntrospectionResult:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
|
||||
@@ -209,6 +297,8 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
# By default, we shouldn't cache the result unless we know it's valid
|
||||
cache_context.should_cache = False
|
||||
introspection_endpoint = await self._introspection_endpoint()
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
@@ -266,7 +356,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"The introspection endpoint returned an invalid JSON response."
|
||||
)
|
||||
|
||||
return IntrospectionToken(**resp)
|
||||
# We had a valid response, so we can cache it
|
||||
cache_context.should_cache = True
|
||||
return IntrospectionResult(
|
||||
IntrospectionToken(**resp), retrieved_at_ms=self._clock.time_msec()
|
||||
)
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
return "urn:synapse:admin:*" in requester.scope
|
||||
@@ -277,6 +371,55 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
allow_guest: If False, will raise an AuthError if the user making the
|
||||
request is a guest.
|
||||
allow_expired: If True, allow the request through even if the account
|
||||
is expired, or session token lifetime has ended. Note that
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired, allow_locked
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
if requester.authenticated_entity in self._force_tracing_for_users:
|
||||
# request tracing is enabled for this user, so we need to force it
|
||||
# tracing on for the parent span (which will be the servlet span).
|
||||
#
|
||||
# It's too late for the get_user_by_req span to inherit the setting,
|
||||
# so we also force it on for that.
|
||||
force_tracing()
|
||||
force_tracing(parent_span)
|
||||
parent_span.set_tag(
|
||||
"authenticated_entity", requester.authenticated_entity
|
||||
)
|
||||
parent_span.set_tag("user_id", requester.user.to_string())
|
||||
if requester.device_id is not None:
|
||||
parent_span.set_tag("device_id", requester.device_id)
|
||||
if requester.app_service is not None:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
@@ -344,7 +487,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
)
|
||||
|
||||
try:
|
||||
introspection_result = await self._introspect_token(token)
|
||||
introspection_result = await self._introspection_cache.wrap(
|
||||
token, self._introspect_token, token, cache_context=True
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to introspect token")
|
||||
raise SynapseError(503, "Unable to introspect the access token")
|
||||
@@ -353,11 +498,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
# TODO: introspection verification should be more extensive, especially:
|
||||
# - verify the audience
|
||||
if not introspection_result.get("active"):
|
||||
if not introspection_result.is_active(self._clock.time_msec()):
|
||||
raise InvalidClientTokenError("Token is not active")
|
||||
|
||||
# Let's look at the scope
|
||||
scope: List[str] = scope_to_list(introspection_result.get("scope", ""))
|
||||
scope: List[str] = introspection_result.get_scope_list()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
@@ -367,7 +512,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
sub: Optional[str] = introspection_result.get("sub")
|
||||
sub: Optional[str] = introspection_result.get_sub()
|
||||
if sub is None:
|
||||
raise InvalidClientTokenError(
|
||||
"Invalid sub claim in the introspection result"
|
||||
@@ -381,7 +526,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# or the external_id was never recorded
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
username: Optional[str] = introspection_result.get("username")
|
||||
username: Optional[str] = introspection_result.get_username()
|
||||
if username is None or not isinstance(username, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
@@ -399,7 +544,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
# If present, use the name claim as the displayname
|
||||
name: Optional[str] = introspection_result.get("name")
|
||||
name: Optional[str] = introspection_result.get_name()
|
||||
|
||||
await self.store.register_user(
|
||||
user_id=user_id.to_string(), create_profile_with_displayname=name
|
||||
@@ -414,15 +559,8 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
# MAS 0.15+ will give us the device ID as an explicit value for compatibility sessions
|
||||
# If present, we get it from here, if not we get it in thee scope
|
||||
device_id = introspection_result.get("device_id")
|
||||
if device_id is not None:
|
||||
# We got the device ID explicitly, just sanity check that it's a string
|
||||
if not isinstance(device_id, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid device ID in introspection result",
|
||||
)
|
||||
else:
|
||||
device_id = introspection_result.get_device_id()
|
||||
if device_id is None:
|
||||
# Find device_ids in scope
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
|
||||
@@ -29,8 +29,13 @@ from typing import Final
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
# Max/min size of ints in canonical JSON
|
||||
CANONICALJSON_MAX_INT = (2**53) - 1
|
||||
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
|
||||
|
||||
# the "depth" field on events is limited to the same as what
|
||||
# canonicaljson accepts
|
||||
MAX_DEPTH = CANONICALJSON_MAX_INT
|
||||
|
||||
# the maximum length for a room alias is 255 characters
|
||||
MAX_ALIAS_LENGTH = 255
|
||||
|
||||
@@ -20,8 +20,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
from typing import Dict, Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.config.ratelimiting import RatelimitSettings
|
||||
@@ -80,12 +79,14 @@ class Ratelimiter:
|
||||
self.store = store
|
||||
self._limiter_name = cfg.key
|
||||
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# A dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
# * The number of tokens currently in the bucket,
|
||||
# * The time point when the bucket was last completely empty, and
|
||||
# * The rate_hz (leak rate) of this particular bucket.
|
||||
self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
|
||||
self.actions: Dict[Hashable, Tuple[float, float, float]] = {}
|
||||
|
||||
self.clock.looping_call(self._prune_message_counts, 60 * 1000)
|
||||
|
||||
def _get_key(
|
||||
self, requester: Optional[Requester], key: Optional[Hashable]
|
||||
@@ -169,9 +170,6 @@ class Ratelimiter:
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
burst_count = burst_count if burst_count is not None else self.burst_count
|
||||
|
||||
# Remove any expired entries
|
||||
self._prune_message_counts(time_now_s)
|
||||
|
||||
# Check if there is an existing count entry for this key
|
||||
action_count, time_start, _ = self._get_action_counts(key, time_now_s)
|
||||
|
||||
@@ -246,13 +244,12 @@ class Ratelimiter:
|
||||
action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
|
||||
self.actions[key] = (action_count + n_actions, time_start, rate_hz)
|
||||
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
def _prune_message_counts(self) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
Args:
|
||||
time_now_s: The current time
|
||||
"""
|
||||
time_now_s = self.clock.time()
|
||||
|
||||
# We create a copy of the key list here as the dictionary is modified during
|
||||
# the loop
|
||||
for key in list(self.actions.keys()):
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List
|
||||
from typing import Dict, List, cast
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
@@ -51,8 +51,8 @@ from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest import ClientRestResource, admin
|
||||
from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
@@ -190,7 +190,11 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
resources.update(build_synapse_client_resource_tree(self))
|
||||
resources["/.well-known"] = well_known_resource(self)
|
||||
|
||||
admin_res = resources.get("/_synapse/admin")
|
||||
if admin_res is not None:
|
||||
admin.register_servlets(self, cast(JsonResource, admin_res))
|
||||
else:
|
||||
resources["/_synapse/admin"] = AdminRestResource(self)
|
||||
elif name == "federation":
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(self)
|
||||
elif name == "media":
|
||||
@@ -199,15 +203,21 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
# We need to serve the admin servlets for media on the
|
||||
# worker.
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
admin_res = resources.get("/_synapse/admin")
|
||||
if admin_res is not None:
|
||||
register_servlets_for_media_repo(
|
||||
self, cast(JsonResource, admin_res)
|
||||
)
|
||||
else:
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
resources["/_synapse/admin"] = admin_resource
|
||||
|
||||
resources.update(
|
||||
{
|
||||
MEDIA_R0_PREFIX: media_repo,
|
||||
MEDIA_V3_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
"/_synapse/admin": admin_resource,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -356,6 +356,9 @@ def _parse_oidc_config_dict(
|
||||
additional_authorization_parameters=oidc_config.get(
|
||||
"additional_authorization_parameters", {}
|
||||
),
|
||||
passthrough_authorization_parameters=oidc_config.get(
|
||||
"passthrough_authorization_parameters", []
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -501,3 +504,6 @@ class OidcProviderConfig:
|
||||
|
||||
# Additional parameters that will be passed to the authorization grant URL
|
||||
additional_authorization_parameters: Mapping[str, str]
|
||||
|
||||
# Allow query parameters to the redirect endpoint that will be passed to the authorization grant URL
|
||||
passthrough_authorization_parameters: Collection[str]
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
import abc
|
||||
import collections.abc
|
||||
import os
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -48,21 +47,21 @@ from synapse.synapse_rust.events import EventInternalMetadata
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.events.builder import EventBuilder
|
||||
|
||||
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||
# bugs where we accidentally share e.g. signature dicts. However, converting a
|
||||
# dict to frozen_dicts is expensive.
|
||||
#
|
||||
# NOTE: This is overridden by the configuration by the Synapse worker apps, but
|
||||
# for the sake of tests, it is set here while it cannot be configured on the
|
||||
# homeserver object itself.
|
||||
|
||||
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
|
||||
USE_FROZEN_DICTS = False
|
||||
"""
|
||||
Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||
bugs where we accidentally share e.g. signature dicts. However, converting a
|
||||
dict to frozen_dicts is expensive.
|
||||
|
||||
NOTE: This is overridden by the configuration by the Synapse worker apps, but
|
||||
for the sake of tests, it is set here because it cannot be configured on the
|
||||
homeserver object itself.
|
||||
"""
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@@ -40,6 +40,8 @@ import attr
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from synapse.api.constants import (
|
||||
CANONICALJSON_MAX_INT,
|
||||
CANONICALJSON_MIN_INT,
|
||||
MAX_PDU_SIZE,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
@@ -61,9 +63,6 @@ SPLIT_FIELD_REGEX = re.compile(r"\\*\.")
|
||||
# Find escaped characters, e.g. those with a \ in front of them.
|
||||
ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)")
|
||||
|
||||
CANONICALJSON_MAX_INT = (2**53) - 1
|
||||
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
|
||||
|
||||
|
||||
# Module API callback that allows adding fields to the unsigned section of
|
||||
# events that are sent to clients.
|
||||
|
||||
@@ -86,9 +86,7 @@ class EventValidator:
|
||||
|
||||
# Depending on the room version, ensure the data is spec compliant JSON.
|
||||
if event.room_version.strict_canonicaljson:
|
||||
# Note that only the client controlled portion of the event is
|
||||
# checked, since we trust the portions of the event we created.
|
||||
validate_canonicaljson(event.content)
|
||||
validate_canonicaljson(event.get_pdu_json())
|
||||
|
||||
if event.type == EventTypes.Aliases:
|
||||
if "aliases" in event.content:
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Optional
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Sequence
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -29,6 +29,7 @@ from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.crypto.keyring import Keyring
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event, validate_canonicaljson
|
||||
from synapse.federation.units import filter_pdus_for_valid_depth
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
@@ -267,6 +268,15 @@ def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def parse_events_from_pdu_json(
|
||||
pdus_json: Sequence[JsonDict], room_version: RoomVersion
|
||||
) -> List[EventBase]:
|
||||
return [
|
||||
event_from_pdu_json(pdu_json, room_version)
|
||||
for pdu_json in filter_pdus_for_valid_depth(pdus_json)
|
||||
]
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase:
|
||||
"""Construct an EventBase from an event json received over federation
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
event_from_pdu_json,
|
||||
parse_events_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.client import is_unknown_endpoint
|
||||
@@ -349,7 +350,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
|
||||
pdus = parse_events_from_pdu_json(transaction_data_pdus, room_version)
|
||||
|
||||
# Check signatures and hash of pdus, removing any from the list that fail checks
|
||||
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
@@ -393,9 +394,7 @@ class FederationClient(FederationBase):
|
||||
transaction_data,
|
||||
)
|
||||
|
||||
pdu_list: List[EventBase] = [
|
||||
event_from_pdu_json(p, room_version) for p in transaction_data["pdus"]
|
||||
]
|
||||
pdu_list = parse_events_from_pdu_json(transaction_data["pdus"], room_version)
|
||||
|
||||
if pdu_list and pdu_list[0]:
|
||||
pdu = pdu_list[0]
|
||||
@@ -809,7 +808,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
|
||||
auth_chain = parse_events_from_pdu_json(res["auth_chain"], room_version)
|
||||
|
||||
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, auth_chain, room_version=room_version
|
||||
@@ -1529,9 +1528,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
events = [
|
||||
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
||||
]
|
||||
events = parse_events_from_pdu_json(content.get("events", []), room_version)
|
||||
|
||||
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, events, room_version=room_version
|
||||
|
||||
@@ -66,7 +66,7 @@ from synapse.federation.federation_base import (
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
@@ -469,7 +469,12 @@ class FederationServer(FederationBase):
|
||||
logger.info("Ignoring PDU: %s", e)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p, room_version)
|
||||
try:
|
||||
event = event_from_pdu_json(p, room_version)
|
||||
except SynapseError as e:
|
||||
logger.info("Ignoring PDU for failing to deserialize: %s", e)
|
||||
continue
|
||||
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
if event.origin_server_ts > newest_pdu_ts:
|
||||
@@ -636,8 +641,8 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
"pdus": serialize_and_filter_pdus(pdus),
|
||||
"auth_chain": serialize_and_filter_pdus(auth_chain),
|
||||
}
|
||||
|
||||
async def on_pdu_request(
|
||||
@@ -761,8 +766,8 @@ class FederationServer(FederationBase):
|
||||
event_json = event.get_pdu_json(time_now)
|
||||
resp = {
|
||||
"event": event_json,
|
||||
"state": [p.get_pdu_json(time_now) for p in state_events],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
|
||||
"state": serialize_and_filter_pdus(state_events, time_now),
|
||||
"auth_chain": serialize_and_filter_pdus(auth_chain_events, time_now),
|
||||
"members_omitted": caller_supports_partial_state,
|
||||
}
|
||||
|
||||
@@ -1005,7 +1010,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = await self.handler.on_event_auth(event_id)
|
||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||
res = {"auth_chain": serialize_and_filter_pdus(auth_pdus, time_now)}
|
||||
return 200, res
|
||||
|
||||
async def on_query_client_keys(
|
||||
@@ -1090,7 +1095,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
||||
return {"events": serialize_and_filter_pdus(missing_events, time_now)}
|
||||
|
||||
async def on_openid_userinfo(self, token: str) -> Optional[str]:
|
||||
ts_now_ms = self._clock.time_msec()
|
||||
|
||||
@@ -24,10 +24,12 @@ server protocol.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Sequence
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import CANONICALJSON_MAX_INT, CANONICALJSON_MIN_INT
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -104,8 +106,28 @@ class Transaction:
|
||||
result = {
|
||||
"origin": self.origin,
|
||||
"origin_server_ts": self.origin_server_ts,
|
||||
"pdus": self.pdus,
|
||||
"pdus": filter_pdus_for_valid_depth(self.pdus),
|
||||
}
|
||||
if self.edus:
|
||||
result["edus"] = self.edus
|
||||
return result
|
||||
|
||||
|
||||
def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]:
|
||||
filtered_pdus = []
|
||||
for pdu in pdus:
|
||||
# Drop PDUs that have a depth that is outside of the range allowed
|
||||
# by canonical json.
|
||||
if (
|
||||
"depth" in pdu
|
||||
and CANONICALJSON_MIN_INT <= pdu["depth"] <= CANONICALJSON_MAX_INT
|
||||
):
|
||||
filtered_pdus.append(pdu)
|
||||
|
||||
return filtered_pdus
|
||||
|
||||
|
||||
def serialize_and_filter_pdus(
|
||||
pdus: Sequence[EventBase], time_now: Optional[int] = None
|
||||
) -> List[JsonDict]:
|
||||
return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus])
|
||||
|
||||
@@ -191,18 +191,36 @@ class DelayedEventsHandler:
|
||||
|
||||
async def _handle_state_deltas(self, deltas: List[StateDelta]) -> None:
|
||||
"""
|
||||
Process current state deltas to cancel pending delayed events
|
||||
Process current state deltas to cancel other users' pending delayed events
|
||||
that target the same state.
|
||||
"""
|
||||
for delta in deltas:
|
||||
if delta.event_id is None:
|
||||
logger.debug(
|
||||
"Not handling delta for deleted state: %r %r",
|
||||
delta.event_type,
|
||||
delta.state_key,
|
||||
)
|
||||
continue
|
||||
|
||||
logger.debug(
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
event = await self._store.get_event(
|
||||
delta.event_id, check_room_id=delta.room_id
|
||||
)
|
||||
sender = UserID.from_string(event.sender)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_state_events(
|
||||
room_id=delta.room_id,
|
||||
event_type=delta.event_type,
|
||||
state_key=delta.state_key,
|
||||
not_from_localpart=(
|
||||
sender.localpart
|
||||
if sender.domain == self._config.server.server_name
|
||||
else ""
|
||||
),
|
||||
)
|
||||
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
|
||||
@@ -163,6 +163,8 @@ class DeviceWorkerHandler:
|
||||
raise errors.NotFoundError()
|
||||
|
||||
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
|
||||
|
||||
device = dict(device)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
|
||||
set_tag("device", str(device))
|
||||
|
||||
@@ -1462,6 +1462,12 @@ class EventCreationHandler:
|
||||
)
|
||||
return prev_event
|
||||
|
||||
if not event.is_state() and event.type in [
|
||||
EventTypes.Message,
|
||||
EventTypes.Encrypted,
|
||||
]:
|
||||
await self.store.set_room_participation(event.user_id, event.room_id)
|
||||
|
||||
if event.internal_metadata.is_out_of_band_membership():
|
||||
# the only sort of out-of-band-membership events we expect to see here are
|
||||
# invite rejections and rescinded knocks that we have generated ourselves.
|
||||
|
||||
@@ -467,6 +467,10 @@ class OidcProvider:
|
||||
|
||||
self._sso_handler.register_identity_provider(self)
|
||||
|
||||
self.passthrough_authorization_parameters = (
|
||||
provider.passthrough_authorization_parameters
|
||||
)
|
||||
|
||||
def _validate_metadata(self, m: OpenIDProviderMetadata) -> None:
|
||||
"""Verifies the provider metadata.
|
||||
|
||||
@@ -1005,7 +1009,6 @@ class OidcProvider:
|
||||
when everything is done (or None for UI Auth)
|
||||
ui_auth_session_id: The session ID of the ongoing UI Auth (or
|
||||
None if this is a login).
|
||||
|
||||
Returns:
|
||||
The redirect URL to the authorization endpoint.
|
||||
|
||||
@@ -1078,6 +1081,13 @@ class OidcProvider:
|
||||
)
|
||||
)
|
||||
|
||||
# add passthrough additional authorization parameters
|
||||
passthrough_authorization_parameters = self.passthrough_authorization_parameters
|
||||
for parameter in passthrough_authorization_parameters:
|
||||
parameter_value = parse_string(request, parameter)
|
||||
if parameter_value:
|
||||
additional_authorization_parameters.update({parameter: parameter_value})
|
||||
|
||||
authorization_endpoint = metadata.get("authorization_endpoint")
|
||||
return prepare_grant_uri(
|
||||
authorization_endpoint,
|
||||
|
||||
@@ -118,6 +118,9 @@ DEFAULT_MAX_TIMEOUT_MS = 20_000
|
||||
# Maximum allowed timeout_ms for download and thumbnail requests
|
||||
MAXIMUM_ALLOWED_MAX_TIMEOUT_MS = 60_000
|
||||
|
||||
# The ETag header value to use for immutable media. This can be anything.
|
||||
_IMMUTABLE_ETAG = "1"
|
||||
|
||||
|
||||
def respond_404(request: SynapseRequest) -> None:
|
||||
assert request.path is not None
|
||||
@@ -224,12 +227,7 @@ def add_file_headers(
|
||||
|
||||
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
|
||||
|
||||
# cache for at least a day.
|
||||
# XXX: we might want to turn this off for data we don't want to
|
||||
# recommend caching as it's sensitive or private - or at least
|
||||
# select private. don't bother setting Expires as all our
|
||||
# clients are smart enough to be happy with Cache-Control
|
||||
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
|
||||
_add_cache_headers(request)
|
||||
|
||||
if file_size is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (file_size,))
|
||||
@@ -240,6 +238,26 @@ def add_file_headers(
|
||||
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
|
||||
|
||||
|
||||
def _add_cache_headers(request: Request) -> None:
|
||||
"""Adds the appropriate cache headers to the response"""
|
||||
|
||||
# Cache on the client for at least a day.
|
||||
#
|
||||
# We set this to "public,s-maxage=0,proxy-revalidate" to allow CDNs to cache
|
||||
# the media, so long as they "revalidate" the media on every request. By
|
||||
# revalidate, we mean send the request to Synapse with a `If-None-Match`
|
||||
# header, to which Synapse can either respond with a 304 if the user is
|
||||
# authenticated/authorized, or a 401/403 if they're not.
|
||||
request.setHeader(
|
||||
b"Cache-Control", b"public,max-age=86400,s-maxage=0,proxy-revalidate"
|
||||
)
|
||||
|
||||
# Set an ETag header to allow requesters to use it in requests to check if
|
||||
# the cache is still valid. Since media is immutable (though may be
|
||||
# deleted), we just set this to a constant.
|
||||
request.setHeader(b"ETag", _IMMUTABLE_ETAG)
|
||||
|
||||
|
||||
# separators as defined in RFC2616. SP and HT are handled separately.
|
||||
# see _can_encode_filename_as_token.
|
||||
_FILENAME_SEPARATOR_CHARS = {
|
||||
@@ -336,13 +354,15 @@ async def respond_with_multipart_responder(
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
_add_cache_headers(request)
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock,
|
||||
request,
|
||||
media_type,
|
||||
{},
|
||||
{}, # Note: if we change this we need to change the returned ETag.
|
||||
disposition,
|
||||
media_length,
|
||||
)
|
||||
@@ -419,6 +439,46 @@ async def respond_with_responder(
|
||||
finish_request(request)
|
||||
|
||||
|
||||
def respond_with_304(request: SynapseRequest) -> None:
|
||||
request.setResponseCode(304)
|
||||
|
||||
# could alternatively use request.notifyFinish() and flip a flag when
|
||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||
# a waste.
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return None
|
||||
|
||||
_add_cache_headers(request)
|
||||
|
||||
request.finish()
|
||||
|
||||
|
||||
def check_for_cached_entry_and_respond(request: SynapseRequest) -> bool:
|
||||
"""Check if the request has a conditional header that allows us to return a
|
||||
304 Not Modified response, and if it does, return a 304 response.
|
||||
|
||||
This handles clients and intermediary proxies caching media.
|
||||
This method assumes that the user has already been
|
||||
authorised to request the media.
|
||||
|
||||
Returns True if we have responded."""
|
||||
|
||||
# We've checked the user has access to the media, so we now check if it
|
||||
# is a "conditional request" and we can just return a `304 Not Modified`
|
||||
# response. Since media is immutable (though may be deleted), we just
|
||||
# check this is the expected constant.
|
||||
etag = request.getHeader("If-None-Match")
|
||||
if etag == _IMMUTABLE_ETAG:
|
||||
# Return a `304 Not modified`.
|
||||
respond_with_304(request)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class Responder(ABC):
|
||||
"""Represents a response that can be streamed to the requester.
|
||||
|
||||
|
||||
@@ -52,13 +52,18 @@ from synapse.media._base import (
|
||||
FileInfo,
|
||||
Responder,
|
||||
ThumbnailInfo,
|
||||
check_for_cached_entry_and_respond,
|
||||
get_filename_from_headers,
|
||||
respond_404,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import (
|
||||
MediaStorage,
|
||||
SHA256TransparentIOReader,
|
||||
SHA256TransparentIOWriter,
|
||||
)
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
@@ -300,15 +305,26 @@ class MediaRepository:
|
||||
auth_user: The user_id of the uploader
|
||||
"""
|
||||
file_info = FileInfo(server_name=None, file_id=media_id)
|
||||
fname = await self.media_storage.store_file(content, file_info)
|
||||
sha256reader = SHA256TransparentIOReader(content)
|
||||
# This implements all of IO as it has a passthrough
|
||||
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
|
||||
sha256 = sha256reader.hexdigest()
|
||||
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
|
||||
logger.info("Stored local media in file %r", fname)
|
||||
|
||||
if should_quarantine:
|
||||
logger.warn(
|
||||
"Media has been automatically quarantined as it matched existing quarantined media"
|
||||
)
|
||||
|
||||
await self.store.update_local_media(
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
upload_name=upload_name,
|
||||
media_length=content_length,
|
||||
user_id=auth_user,
|
||||
sha256=sha256,
|
||||
quarantined_by="system" if should_quarantine else None,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -341,11 +357,19 @@ class MediaRepository:
|
||||
media_id = random_string(24)
|
||||
|
||||
file_info = FileInfo(server_name=None, file_id=media_id)
|
||||
|
||||
fname = await self.media_storage.store_file(content, file_info)
|
||||
# This implements all of IO as it has a passthrough
|
||||
sha256reader = SHA256TransparentIOReader(content)
|
||||
fname = await self.media_storage.store_file(sha256reader.wrap(), file_info)
|
||||
sha256 = sha256reader.hexdigest()
|
||||
should_quarantine = await self.store.get_is_hash_quarantined(sha256)
|
||||
|
||||
logger.info("Stored local media in file %r", fname)
|
||||
|
||||
if should_quarantine:
|
||||
logger.warn(
|
||||
"Media has been automatically quarantined as it matched existing quarantined media"
|
||||
)
|
||||
|
||||
await self.store.store_local_media(
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
@@ -353,6 +377,9 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=content_length,
|
||||
user_id=auth_user,
|
||||
sha256=sha256,
|
||||
# TODO: Better name?
|
||||
quarantined_by="system" if should_quarantine else None,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -459,6 +486,11 @@ class MediaRepository:
|
||||
|
||||
self.mark_recently_accessed(None, media_id)
|
||||
|
||||
# Once we've checked auth we can return early if the media is cached on
|
||||
# the client
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
return
|
||||
|
||||
media_type = media_info.media_type
|
||||
if not media_type:
|
||||
media_type = "application/octet-stream"
|
||||
@@ -538,6 +570,17 @@ class MediaRepository:
|
||||
allow_authenticated,
|
||||
)
|
||||
|
||||
# Check if the media is cached on the client, if so return 304. We need
|
||||
# to do this after we have fetched remote media, as we need it to do the
|
||||
# auth.
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
# We always need to use the responder.
|
||||
if responder:
|
||||
with responder:
|
||||
pass
|
||||
|
||||
return
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
if responder and media_info:
|
||||
upload_name = name if name else media_info.upload_name
|
||||
@@ -739,11 +782,13 @@ class MediaRepository:
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
sha256writer = SHA256TransparentIOWriter(f)
|
||||
try:
|
||||
length, headers = await self.client.download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
# This implements all of BinaryIO as it has a passthrough
|
||||
output_stream=sha256writer.wrap(),
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
@@ -808,6 +853,7 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
logger.info("Stored remote media in file %r", fname)
|
||||
@@ -828,6 +874,7 @@ class MediaRepository:
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
@@ -862,11 +909,13 @@ class MediaRepository:
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
sha256writer = SHA256TransparentIOWriter(f)
|
||||
try:
|
||||
res = await self.client.federation_download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
# This implements all of BinaryIO as it has a passthrough
|
||||
output_stream=sha256writer.wrap(),
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
@@ -937,6 +986,7 @@ class MediaRepository:
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
@@ -957,6 +1007,7 @@ class MediaRepository:
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
sha256=sha256writer.hexdigest(),
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#
|
||||
#
|
||||
import contextlib
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -70,6 +71,88 @@ logger = logging.getLogger(__name__)
|
||||
CRLF = b"\r\n"
|
||||
|
||||
|
||||
class SHA256TransparentIOWriter:
|
||||
"""Will generate a SHA256 hash from a source stream transparently.
|
||||
|
||||
Args:
|
||||
source: Source stream.
|
||||
"""
|
||||
|
||||
def __init__(self, source: BinaryIO):
|
||||
self._hash = hashlib.sha256()
|
||||
self._source = source
|
||||
|
||||
def write(self, buffer: Union[bytes, bytearray]) -> int:
|
||||
"""Wrapper for source.write()
|
||||
|
||||
Args:
|
||||
buffer
|
||||
|
||||
Returns:
|
||||
the value of source.write()
|
||||
"""
|
||||
res = self._source.write(buffer)
|
||||
self._hash.update(buffer)
|
||||
return res
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""The digest of the written or read value.
|
||||
|
||||
Returns:
|
||||
The digest in hex formaat.
|
||||
"""
|
||||
return self._hash.hexdigest()
|
||||
|
||||
def wrap(self) -> BinaryIO:
|
||||
# This class implements a subset the IO interface and passes through everything else via __getattr__
|
||||
return cast(BinaryIO, self)
|
||||
|
||||
# Passthrough any other calls
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self._source, attr_name)
|
||||
|
||||
|
||||
class SHA256TransparentIOReader:
|
||||
"""Will generate a SHA256 hash from a source stream transparently.
|
||||
|
||||
Args:
|
||||
source: Source IO stream.
|
||||
"""
|
||||
|
||||
def __init__(self, source: IO):
|
||||
self._hash = hashlib.sha256()
|
||||
self._source = source
|
||||
|
||||
def read(self, n: int = -1) -> bytes:
|
||||
"""Wrapper for source.read()
|
||||
|
||||
Args:
|
||||
n
|
||||
|
||||
Returns:
|
||||
the value of source.read()
|
||||
"""
|
||||
bytes = self._source.read(n)
|
||||
self._hash.update(bytes)
|
||||
return bytes
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""The digest of the written or read value.
|
||||
|
||||
Returns:
|
||||
The digest in hex formaat.
|
||||
"""
|
||||
return self._hash.hexdigest()
|
||||
|
||||
def wrap(self) -> IO:
|
||||
# This class implements a subset the IO interface and passes through everything else via __getattr__
|
||||
return cast(IO, self)
|
||||
|
||||
# Passthrough any other calls
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self._source, attr_name)
|
||||
|
||||
|
||||
class MediaStorage:
|
||||
"""Responsible for storing/fetching files from local sources.
|
||||
|
||||
@@ -107,7 +190,6 @@ class MediaStorage:
|
||||
Returns:
|
||||
the file path written to in the primary media store
|
||||
"""
|
||||
|
||||
async with self.store_into_file(file_info) as (f, fname):
|
||||
# Write to the main media repository
|
||||
await self.write_to_file(source, f)
|
||||
|
||||
@@ -34,6 +34,7 @@ from synapse.logging.opentracing import trace
|
||||
from synapse.media._base import (
|
||||
FileInfo,
|
||||
ThumbnailInfo,
|
||||
check_for_cached_entry_and_respond,
|
||||
respond_404,
|
||||
respond_with_file,
|
||||
respond_with_multipart_responder,
|
||||
@@ -294,6 +295,11 @@ class ThumbnailProvider:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
# Once we've checked auth we can return early if the media is cached on
|
||||
# the client
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
await self._select_and_respond_with_thumbnail(
|
||||
request,
|
||||
@@ -334,6 +340,11 @@ class ThumbnailProvider:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
# Once we've checked auth we can return early if the media is cached on
|
||||
# the client
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
for info in thumbnail_infos:
|
||||
t_w = info.width == desired_width
|
||||
@@ -431,6 +442,10 @@ class ThumbnailProvider:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# Check if the media is cached on the client, if so return 304.
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
@@ -510,6 +525,10 @@ class ThumbnailProvider:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
# Check if the media is cached on the client, if so return 304.
|
||||
if check_for_cached_entry_and_respond(request):
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
|
||||
@@ -66,7 +66,6 @@ from synapse.types import (
|
||||
from synapse.util.async_helpers import (
|
||||
timeout_deferred,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import shortstr
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
@@ -520,20 +519,22 @@ class Notifier:
|
||||
users = users or []
|
||||
rooms = rooms or []
|
||||
|
||||
with Measure(self.clock, "on_new_event"):
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
|
||||
# Only calculate which user streams to wake up if there are, in fact,
|
||||
# any user streams registered.
|
||||
if self.user_to_user_stream or self.room_to_user_streams:
|
||||
for user in users:
|
||||
user_stream = self.user_to_user_stream.get(str(user))
|
||||
if user_stream is not None:
|
||||
@@ -565,25 +566,25 @@ class Notifier:
|
||||
# We resolve all these deferreds in one go so that we only need to
|
||||
# call `PreserveLoggingContext` once, as it has a bunch of overhead
|
||||
# (to calculate performance stats)
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
if listeners:
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
if user_streams:
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
|
||||
self.notify_replication()
|
||||
self.notify_replication()
|
||||
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error notifying application services of ephemeral events"
|
||||
)
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of ephemeral events")
|
||||
|
||||
def on_new_replication_data(self) -> None:
|
||||
"""Used to inform replication listeners that something has happened
|
||||
|
||||
@@ -205,6 +205,12 @@ class HttpPusher(Pusher):
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
# Check if we are trying, but failing, to contact the pusher. If so, we
|
||||
# don't try and start processing immediately and instead wait for the
|
||||
# retry loop to try again later (which is controlled by the timer).
|
||||
if self.failing_since and self.timed_call and self.timed_call.active():
|
||||
return
|
||||
|
||||
run_as_background_process("httppush.process", self._process)
|
||||
|
||||
async def _process(self) -> None:
|
||||
|
||||
@@ -275,7 +275,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
"""
|
||||
Register all the admin servlets.
|
||||
"""
|
||||
# Admin servlets aren't registered on workers.
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
|
||||
# Admin servlets below may not work on workers.
|
||||
if hs.config.worker.worker_app is not None:
|
||||
return
|
||||
|
||||
@@ -283,7 +285,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BlockRoomRestServlet(hs).register(http_server)
|
||||
ListRoomRestServlet(hs).register(http_server)
|
||||
RoomStateRestServlet(hs).register(http_server)
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
RoomRestV2Servlet(hs).register(http_server)
|
||||
RoomMembersRestServlet(hs).register(http_server)
|
||||
DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)
|
||||
|
||||
@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
|
||||
self._is_main_process = hs.config.worker.worker_app is None
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
@@ -179,6 +179,14 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_DELETE(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "DELETE on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
try:
|
||||
@@ -223,6 +231,14 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "PUT on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PutBody)
|
||||
@@ -585,9 +601,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
):
|
||||
DeleteDevicesRestServlet(hs).register(http_server)
|
||||
DevicesRestServlet(hs).register(http_server)
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.worker.worker_app is None:
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc2697_enabled:
|
||||
DehydratedDeviceServlet(hs).register(http_server)
|
||||
ClaimDehydratedDeviceServlet(hs).register(http_server)
|
||||
|
||||
@@ -24,7 +24,7 @@ from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
|
||||
from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from synapse.api.filtering import FilterCollection
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
@@ -248,9 +248,8 @@ class SyncRestServlet(RestServlet):
|
||||
await self._server_notices_sender.on_user_syncing(user.to_string())
|
||||
|
||||
# ignore the presence update if the ratelimit is exceeded but do not pause the request
|
||||
try:
|
||||
await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
|
||||
except LimitExceededError:
|
||||
allowed, _ = await self._presence_per_user_limiter.can_do_action(requester)
|
||||
if not allowed:
|
||||
affect_presence = False
|
||||
logger.debug("User set_presence ratelimit exceeded; ignoring it.")
|
||||
else:
|
||||
|
||||
@@ -25,6 +25,7 @@ from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
)
|
||||
|
||||
@@ -52,7 +53,7 @@ class PurgeEventsStorageController:
|
||||
)
|
||||
|
||||
self.stores.state.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.DELETE_UNREFERENCED_STATE_GROUPS_BG_UPDATE,
|
||||
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE,
|
||||
self._background_delete_unrefereneced_state_groups,
|
||||
)
|
||||
|
||||
@@ -92,6 +93,69 @@ class PurgeEventsStorageController:
|
||||
sg_to_delete
|
||||
)
|
||||
|
||||
async def _find_unreferenced_groups(
|
||||
self,
|
||||
state_groups: Collection[int],
|
||||
) -> Set[int]:
|
||||
"""Used when purging history to figure out which state groups can be
|
||||
deleted.
|
||||
|
||||
Args:
|
||||
state_groups: Set of state groups referenced by events
|
||||
that are going to be deleted.
|
||||
|
||||
Returns:
|
||||
The set of state groups that can be deleted.
|
||||
"""
|
||||
# Set of events that we have found to be referenced by events
|
||||
referenced_groups = set()
|
||||
|
||||
# Set of state groups we've already seen
|
||||
state_groups_seen = set(state_groups)
|
||||
|
||||
# Set of state groups to handle next.
|
||||
next_to_search = set(state_groups)
|
||||
while next_to_search:
|
||||
# We bound size of groups we're looking up at once, to stop the
|
||||
# SQL query getting too big
|
||||
if len(next_to_search) < 100:
|
||||
current_search = next_to_search
|
||||
next_to_search = set()
|
||||
else:
|
||||
current_search = set(itertools.islice(next_to_search, 100))
|
||||
next_to_search -= current_search
|
||||
|
||||
referenced = await self.stores.main.get_referenced_state_groups(
|
||||
current_search
|
||||
)
|
||||
referenced_groups |= referenced
|
||||
|
||||
# We don't continue iterating up the state group graphs for state
|
||||
# groups that are referenced.
|
||||
current_search -= referenced
|
||||
|
||||
edges = await self.stores.state.get_previous_state_groups(current_search)
|
||||
|
||||
prevs = set(edges.values())
|
||||
# We don't bother re-handling groups we've already seen
|
||||
prevs -= state_groups_seen
|
||||
next_to_search |= prevs
|
||||
state_groups_seen |= prevs
|
||||
|
||||
# We also check to see if anything referencing the state groups are
|
||||
# also unreferenced. This helps ensure that we delete unreferenced
|
||||
# state groups, if we don't then we will de-delta them when we
|
||||
# delete the other state groups leading to increased DB usage.
|
||||
next_edges = await self.stores.state.get_next_state_groups(current_search)
|
||||
nexts = set(next_edges.keys())
|
||||
nexts -= state_groups_seen
|
||||
next_to_search |= nexts
|
||||
state_groups_seen |= nexts
|
||||
|
||||
to_delete = state_groups_seen - referenced_groups
|
||||
|
||||
return to_delete
|
||||
|
||||
@wrap_as_background_process("_delete_state_groups_loop")
|
||||
async def _delete_state_groups_loop(self) -> None:
|
||||
"""Background task that deletes any state groups that may be pending
|
||||
@@ -160,47 +224,47 @@ class PurgeEventsStorageController:
|
||||
"""This background update will slowly delete any unreferenced state groups"""
|
||||
|
||||
last_checked_state_group = progress.get("last_checked_state_group")
|
||||
max_state_group = progress.get("max_state_group")
|
||||
|
||||
if last_checked_state_group is None or max_state_group is None:
|
||||
if last_checked_state_group is None:
|
||||
# This is the first run.
|
||||
last_checked_state_group = 0
|
||||
|
||||
max_state_group = await self.stores.state.db_pool.simple_select_one_onecol(
|
||||
table="state_groups",
|
||||
keyvalues={},
|
||||
retcol="MAX(id)",
|
||||
allow_none=True,
|
||||
desc="get_max_state_group",
|
||||
last_checked_state_group = (
|
||||
await self.stores.state.db_pool.simple_select_one_onecol(
|
||||
table="state_groups",
|
||||
keyvalues={},
|
||||
retcol="MAX(id)",
|
||||
allow_none=True,
|
||||
desc="get_max_state_group",
|
||||
)
|
||||
)
|
||||
if max_state_group is None:
|
||||
if last_checked_state_group is None:
|
||||
# There are no state groups so the background process is finished.
|
||||
await self.stores.state.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.DELETE_UNREFERENCED_STATE_GROUPS_BG_UPDATE
|
||||
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE
|
||||
)
|
||||
return batch_size
|
||||
last_checked_state_group += 1
|
||||
|
||||
(
|
||||
last_checked_state_group,
|
||||
final_batch,
|
||||
) = await self._delete_unreferenced_state_groups_batch(
|
||||
last_checked_state_group, batch_size, max_state_group
|
||||
last_checked_state_group,
|
||||
batch_size,
|
||||
)
|
||||
|
||||
if not final_batch:
|
||||
# There are more state groups to check.
|
||||
progress = {
|
||||
"last_checked_state_group": last_checked_state_group,
|
||||
"max_state_group": max_state_group,
|
||||
}
|
||||
await self.stores.state.db_pool.updates._background_update_progress(
|
||||
_BackgroundUpdates.DELETE_UNREFERENCED_STATE_GROUPS_BG_UPDATE,
|
||||
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE,
|
||||
progress,
|
||||
)
|
||||
else:
|
||||
# This background process is finished.
|
||||
await self.stores.state.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.DELETE_UNREFERENCED_STATE_GROUPS_BG_UPDATE
|
||||
_BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE
|
||||
)
|
||||
|
||||
return batch_size
|
||||
@@ -209,11 +273,9 @@ class PurgeEventsStorageController:
|
||||
self,
|
||||
last_checked_state_group: int,
|
||||
batch_size: int,
|
||||
max_state_group: int,
|
||||
) -> tuple[int, bool]:
|
||||
"""Looks for unreferenced state groups starting from the last state group
|
||||
checked, and any state groups which would become unreferenced if a state group
|
||||
was deleted, and marks them for deletion.
|
||||
checked and marks them for deletion.
|
||||
|
||||
Args:
|
||||
last_checked_state_group: The last state group that was checked.
|
||||
@@ -223,35 +285,15 @@ class PurgeEventsStorageController:
|
||||
(last_checked_state_group, final_batch)
|
||||
"""
|
||||
|
||||
# Look for state groups that can be cleaned up.
|
||||
def get_next_state_groups_txn(txn: LoggingTransaction) -> Set[int]:
|
||||
state_group_sql = "SELECT id FROM state_groups WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
|
||||
txn.execute(
|
||||
state_group_sql, (last_checked_state_group, max_state_group, batch_size)
|
||||
)
|
||||
|
||||
next_set = {row[0] for row in txn}
|
||||
|
||||
return next_set
|
||||
|
||||
next_set = await self.stores.state.db_pool.runInteraction(
|
||||
"get_next_state_groups", get_next_state_groups_txn
|
||||
# Find all state groups that can be deleted if any of the original set are deleted.
|
||||
(
|
||||
to_delete,
|
||||
last_checked_state_group,
|
||||
final_batch,
|
||||
) = await self._find_unreferenced_groups_for_background_deletion(
|
||||
last_checked_state_group, batch_size
|
||||
)
|
||||
|
||||
final_batch = False
|
||||
if len(next_set) < batch_size:
|
||||
final_batch = True
|
||||
else:
|
||||
last_checked_state_group = max(next_set)
|
||||
|
||||
if len(next_set) == 0:
|
||||
return last_checked_state_group, final_batch
|
||||
|
||||
# Find all state groups that can be deleted if the original set is deleted.
|
||||
# This set includes the original set, as well as any state groups that would
|
||||
# become unreferenced upon deleting the original set.
|
||||
to_delete = await self._find_unreferenced_groups(next_set)
|
||||
|
||||
if len(to_delete) == 0:
|
||||
return last_checked_state_group, final_batch
|
||||
|
||||
@@ -261,65 +303,146 @@ class PurgeEventsStorageController:
|
||||
|
||||
return last_checked_state_group, final_batch
|
||||
|
||||
async def _find_unreferenced_groups(
|
||||
async def _find_unreferenced_groups_for_background_deletion(
|
||||
self,
|
||||
state_groups: Collection[int],
|
||||
) -> Set[int]:
|
||||
"""Used when purging history to figure out which state groups can be
|
||||
deleted.
|
||||
last_checked_state_group: int,
|
||||
batch_size: int,
|
||||
) -> tuple[Set[int], int, bool]:
|
||||
"""Used when deleting unreferenced state groups in the background to figure out
|
||||
which state groups can be deleted.
|
||||
To avoid increased DB usage due to de-deltaing state groups, this returns only
|
||||
state groups which are free standing (ie. no shared edges with referenced groups) or
|
||||
state groups which do not share edges which result in a future referenced group.
|
||||
|
||||
The following scenarios outline the possibilities based on state group data in
|
||||
the DB.
|
||||
|
||||
ie. Free standing -> state groups 1-N would be returned:
|
||||
SG_1
|
||||
|
|
||||
...
|
||||
|
|
||||
SG_N
|
||||
|
||||
ie. Previous reference -> state groups 2-N would be returned:
|
||||
SG_1 <- referenced by event
|
||||
|
|
||||
SG_2
|
||||
|
|
||||
...
|
||||
|
|
||||
SG_N
|
||||
|
||||
ie. Future reference -> none of the following state groups would be returned:
|
||||
SG_1
|
||||
|
|
||||
SG_2
|
||||
|
|
||||
...
|
||||
|
|
||||
SG_N <- referenced by event
|
||||
|
||||
Args:
|
||||
state_groups: Set of state groups referenced by events
|
||||
that are going to be deleted.
|
||||
last_checked_state_group: The last state group that was checked.
|
||||
batch_size: How many state groups to process in this iteration.
|
||||
|
||||
Returns:
|
||||
The set of state groups that can be deleted.
|
||||
(to_delete, last_checked_state_group, final_batch)
|
||||
"""
|
||||
# Set of events that we have found to be referenced by events
|
||||
referenced_groups = set()
|
||||
|
||||
# Set of state groups we've already seen
|
||||
state_groups_seen = set(state_groups)
|
||||
# If a state group's next edge is not pending deletion then we don't delete the state group.
|
||||
# If there is no next edge or the next edges are all marked for deletion, then delete
|
||||
# the state group.
|
||||
# This holds since we walk backwards from the latest state groups, ensuring that
|
||||
# we've already checked newer state groups for event references along the way.
|
||||
def get_next_state_groups_marked_for_deletion_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> tuple[dict[int, bool], dict[int, int]]:
|
||||
state_group_sql = """
|
||||
SELECT s.id, e.state_group, d.state_group
|
||||
FROM (
|
||||
SELECT id FROM state_groups
|
||||
WHERE id < ? ORDER BY id DESC LIMIT ?
|
||||
) as s
|
||||
LEFT JOIN state_group_edges AS e ON (s.id = e.prev_state_group)
|
||||
LEFT JOIN state_groups_pending_deletion AS d ON (e.state_group = d.state_group)
|
||||
"""
|
||||
txn.execute(state_group_sql, (last_checked_state_group, batch_size))
|
||||
|
||||
# Set of state groups to handle next.
|
||||
next_to_search = set(state_groups)
|
||||
while next_to_search:
|
||||
# We bound size of groups we're looking up at once, to stop the
|
||||
# SQL query getting too big
|
||||
if len(next_to_search) < 100:
|
||||
current_search = next_to_search
|
||||
next_to_search = set()
|
||||
else:
|
||||
current_search = set(itertools.islice(next_to_search, 100))
|
||||
next_to_search -= current_search
|
||||
# Mapping from state group to whether we should delete it.
|
||||
state_groups_to_deletion: dict[int, bool] = {}
|
||||
|
||||
referenced = await self.stores.main.get_referenced_state_groups(
|
||||
current_search
|
||||
)
|
||||
referenced_groups |= referenced
|
||||
# Mapping from state group to prev state group.
|
||||
state_groups_to_prev: dict[int, int] = {}
|
||||
|
||||
# We don't continue iterating up the state group graphs for state
|
||||
# groups that are referenced.
|
||||
current_search -= referenced
|
||||
for row in txn:
|
||||
state_group = row[0]
|
||||
next_edge = row[1]
|
||||
pending_deletion = row[2]
|
||||
|
||||
edges = await self.stores.state.get_previous_state_groups(current_search)
|
||||
if next_edge is not None:
|
||||
state_groups_to_prev[next_edge] = state_group
|
||||
|
||||
prevs = set(edges.values())
|
||||
# We don't bother re-handling groups we've already seen
|
||||
prevs -= state_groups_seen
|
||||
next_to_search |= prevs
|
||||
state_groups_seen |= prevs
|
||||
if next_edge is not None and not pending_deletion:
|
||||
# We have found an edge not marked for deletion.
|
||||
# Check previous results to see if this group is part of a chain
|
||||
# within this batch that qualifies for deletion.
|
||||
# ie. batch contains:
|
||||
# SG_1 -> SG_2 -> SG_3
|
||||
# If SG_3 is a candidate for deletion, then SG_2 & SG_1 should also
|
||||
# be, even though they have edges which may not be marked for
|
||||
# deletion.
|
||||
# This relies on SQL results being sorted in DESC order to work.
|
||||
next_is_deletion_candidate = state_groups_to_deletion.get(next_edge)
|
||||
if (
|
||||
next_is_deletion_candidate is None
|
||||
or not next_is_deletion_candidate
|
||||
):
|
||||
state_groups_to_deletion[state_group] = False
|
||||
else:
|
||||
state_groups_to_deletion.setdefault(state_group, True)
|
||||
else:
|
||||
# This state group may be a candidate for deletion
|
||||
state_groups_to_deletion.setdefault(state_group, True)
|
||||
|
||||
# We also check to see if anything referencing the state groups are
|
||||
# also unreferenced. This helps ensure that we delete unreferenced
|
||||
# state groups, if we don't then we will de-delta them when we
|
||||
# delete the other state groups leading to increased DB usage.
|
||||
next_edges = await self.stores.state.get_next_state_groups(current_search)
|
||||
nexts = set(next_edges.keys())
|
||||
nexts -= state_groups_seen
|
||||
next_to_search |= nexts
|
||||
state_groups_seen |= nexts
|
||||
return state_groups_to_deletion, state_groups_to_prev
|
||||
|
||||
to_delete = state_groups_seen - referenced_groups
|
||||
(
|
||||
state_groups_to_deletion,
|
||||
state_group_edges,
|
||||
) = await self.stores.state.db_pool.runInteraction(
|
||||
"get_next_state_groups_marked_for_deletion",
|
||||
get_next_state_groups_marked_for_deletion_txn,
|
||||
)
|
||||
deletion_candidates = {
|
||||
state_group
|
||||
for state_group, deletion in state_groups_to_deletion.items()
|
||||
if deletion
|
||||
}
|
||||
|
||||
return to_delete
|
||||
final_batch = False
|
||||
state_groups = state_groups_to_deletion.keys()
|
||||
if len(state_groups) < batch_size:
|
||||
final_batch = True
|
||||
else:
|
||||
last_checked_state_group = min(state_groups)
|
||||
|
||||
if len(state_groups) == 0:
|
||||
return set(), last_checked_state_group, final_batch
|
||||
|
||||
# Determine if any of the remaining state groups are directly referenced.
|
||||
referenced = await self.stores.main.get_referenced_state_groups(
|
||||
deletion_candidates
|
||||
)
|
||||
|
||||
# Remove state groups from deletion_candidates which are directly referenced or share a
|
||||
# future edge with a referenced state group within this batch.
|
||||
def filter_reference_chains(group: Optional[int]) -> None:
|
||||
while group is not None:
|
||||
deletion_candidates.discard(group)
|
||||
group = state_group_edges.get(group)
|
||||
|
||||
for referenced_group in referenced:
|
||||
filter_reference_chains(referenced_group)
|
||||
|
||||
return deletion_candidates, last_checked_state_group, final_batch
|
||||
|
||||
@@ -424,25 +424,37 @@ class DelayedEventsStore(SQLBaseStore):
|
||||
room_id: str,
|
||||
event_type: str,
|
||||
state_key: str,
|
||||
not_from_localpart: str,
|
||||
) -> Optional[Timestamp]:
|
||||
"""
|
||||
Cancels all matching delayed state events, i.e. remove them as long as they haven't been processed.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to match against.
|
||||
event_type: The event type to match against.
|
||||
state_key: The state key to match against.
|
||||
not_from_localpart: The localpart of a user whose delayed events to not cancel.
|
||||
If set to the empty string, any users' delayed events may be cancelled.
|
||||
|
||||
Returns: The send time of the next delayed event to be sent, if any.
|
||||
"""
|
||||
|
||||
def cancel_delayed_state_events_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> Optional[Timestamp]:
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="delayed_events",
|
||||
keyvalues={
|
||||
"room_id": room_id,
|
||||
"event_type": event_type,
|
||||
"state_key": state_key,
|
||||
"is_processed": False,
|
||||
},
|
||||
txn.execute(
|
||||
"""
|
||||
DELETE FROM delayed_events
|
||||
WHERE room_id = ? AND event_type = ? AND state_key = ?
|
||||
AND user_localpart <> ?
|
||||
AND NOT is_processed
|
||||
""",
|
||||
(
|
||||
room_id,
|
||||
event_type,
|
||||
state_key,
|
||||
not_from_localpart,
|
||||
),
|
||||
)
|
||||
return self._get_next_delayed_event_send_ts_txn(txn)
|
||||
|
||||
|
||||
@@ -282,9 +282,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
"count_devices_by_users", count_devices_by_users_txn, user_ids
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def get_device(
|
||||
self, user_id: str, device_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
) -> Optional[Mapping[str, Any]]:
|
||||
"""Retrieve a device. Only returns devices that are not marked as
|
||||
hidden.
|
||||
|
||||
@@ -1817,6 +1818,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
},
|
||||
desc="store_device",
|
||||
)
|
||||
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
|
||||
|
||||
if not inserted:
|
||||
# if the device already exists, check if it's a real device, or
|
||||
# if the device ID is reserved by something else
|
||||
@@ -1882,6 +1885,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
values=device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream_bulk(
|
||||
txn, self.get_device, [(user_id, device_id) for device_id in device_ids]
|
||||
)
|
||||
|
||||
for batch in batch_iter(device_ids, 100):
|
||||
await self.db_pool.runInteraction(
|
||||
@@ -1915,6 +1921,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
updatevalues=updates,
|
||||
desc="update_device",
|
||||
)
|
||||
await self.invalidate_cache_and_stream("get_device", (user_id, device_id))
|
||||
|
||||
async def update_remote_device_list_cache_entry(
|
||||
self, user_id: str, device_id: str, content: JsonDict, stream_id: str
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -51,6 +52,8 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = (
|
||||
"media_repository_drop_index_wo_method_2"
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class LocalMedia:
|
||||
@@ -65,6 +68,7 @@ class LocalMedia:
|
||||
safe_from_quarantine: bool
|
||||
user_id: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
sha256: Optional[str]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -79,6 +83,7 @@ class RemoteMedia:
|
||||
last_access_ts: int
|
||||
quarantined_by: Optional[str]
|
||||
authenticated: Optional[bool]
|
||||
sha256: Optional[str]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -154,6 +159,26 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
|
||||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="local_media_repository_sha256_idx",
|
||||
index_name="local_media_repository_sha256",
|
||||
table="local_media_repository",
|
||||
where_clause="sha256 IS NOT NULL",
|
||||
columns=[
|
||||
"sha256",
|
||||
],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="remote_media_cache_sha256_idx",
|
||||
index_name="remote_media_cache_sha256",
|
||||
table="remote_media_cache",
|
||||
where_clause="sha256 IS NOT NULL",
|
||||
columns=[
|
||||
"sha256",
|
||||
],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2,
|
||||
self._drop_media_index_without_method,
|
||||
@@ -221,6 +246,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"safe_from_quarantine",
|
||||
"user_id",
|
||||
"authenticated",
|
||||
"sha256",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_local_media",
|
||||
@@ -239,6 +265,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
safe_from_quarantine=row[7],
|
||||
user_id=row[8],
|
||||
authenticated=row[9],
|
||||
sha256=row[10],
|
||||
)
|
||||
|
||||
async def get_local_media_by_user_paginate(
|
||||
@@ -295,7 +322,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
quarantined_by,
|
||||
safe_from_quarantine,
|
||||
user_id,
|
||||
authenticated
|
||||
authenticated,
|
||||
sha256
|
||||
FROM local_media_repository
|
||||
WHERE user_id = ?
|
||||
ORDER BY {order_by_column} {order}, media_id ASC
|
||||
@@ -320,6 +348,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
safe_from_quarantine=bool(row[8]),
|
||||
user_id=row[9],
|
||||
authenticated=row[10],
|
||||
sha256=row[11],
|
||||
)
|
||||
for row in txn
|
||||
]
|
||||
@@ -449,6 +478,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
media_length: int,
|
||||
user_id: UserID,
|
||||
url_cache: Optional[str] = None,
|
||||
sha256: Optional[str] = None,
|
||||
quarantined_by: Optional[str] = None,
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
@@ -466,6 +497,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"user_id": user_id.to_string(),
|
||||
"url_cache": url_cache,
|
||||
"authenticated": authenticated,
|
||||
"sha256": sha256,
|
||||
"quarantined_by": quarantined_by,
|
||||
},
|
||||
desc="store_local_media",
|
||||
)
|
||||
@@ -477,20 +510,28 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
upload_name: Optional[str],
|
||||
media_length: int,
|
||||
user_id: UserID,
|
||||
sha256: str,
|
||||
url_cache: Optional[str] = None,
|
||||
quarantined_by: Optional[str] = None,
|
||||
) -> None:
|
||||
updatevalues = {
|
||||
"media_type": media_type,
|
||||
"upload_name": upload_name,
|
||||
"media_length": media_length,
|
||||
"url_cache": url_cache,
|
||||
"sha256": sha256,
|
||||
}
|
||||
|
||||
# This should never be un-set by this function.
|
||||
if quarantined_by is not None:
|
||||
updatevalues["quarantined_by"] = quarantined_by
|
||||
|
||||
await self.db_pool.simple_update_one(
|
||||
"local_media_repository",
|
||||
keyvalues={
|
||||
"user_id": user_id.to_string(),
|
||||
"media_id": media_id,
|
||||
},
|
||||
updatevalues={
|
||||
"media_type": media_type,
|
||||
"upload_name": upload_name,
|
||||
"media_length": media_length,
|
||||
"url_cache": url_cache,
|
||||
},
|
||||
updatevalues=updatevalues,
|
||||
desc="update_local_media",
|
||||
)
|
||||
|
||||
@@ -657,6 +698,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"last_access_ts",
|
||||
"quarantined_by",
|
||||
"authenticated",
|
||||
"sha256",
|
||||
),
|
||||
allow_none=True,
|
||||
desc="get_cached_remote_media",
|
||||
@@ -674,6 +716,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
last_access_ts=row[5],
|
||||
quarantined_by=row[6],
|
||||
authenticated=row[7],
|
||||
sha256=row[8],
|
||||
)
|
||||
|
||||
async def store_cached_remote_media(
|
||||
@@ -685,6 +728,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
time_now_ms: int,
|
||||
upload_name: Optional[str],
|
||||
filesystem_id: str,
|
||||
sha256: Optional[str],
|
||||
) -> None:
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
@@ -703,6 +747,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"filesystem_id": filesystem_id,
|
||||
"last_access_ts": time_now_ms,
|
||||
"authenticated": authenticated,
|
||||
"sha256": sha256,
|
||||
},
|
||||
desc="store_cached_remote_media",
|
||||
)
|
||||
@@ -946,3 +991,46 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_url_cache_media", _delete_url_cache_media_txn
|
||||
)
|
||||
|
||||
async def get_is_hash_quarantined(self, sha256: str) -> bool:
|
||||
"""Get whether a specific sha256 hash digest matches any quarantined media.
|
||||
|
||||
Returns:
|
||||
None if the media_id doesn't exist.
|
||||
"""
|
||||
|
||||
# If we don't have the index yet, performance tanks, so we return False.
|
||||
# In the background updates, remote_media_cache_sha256_idx is created
|
||||
# after local_media_repository_sha256_idx, which is why we only need to
|
||||
# check for the completion of the former.
|
||||
if not await self.db_pool.updates.has_completed_background_update(
|
||||
"remote_media_cache_sha256_idx"
|
||||
):
|
||||
return False
|
||||
|
||||
def get_matching_media_txn(
|
||||
txn: LoggingTransaction, table: str, sha256: str
|
||||
) -> bool:
|
||||
# Return on first match
|
||||
sql = """
|
||||
SELECT 1
|
||||
FROM local_media_repository
|
||||
WHERE sha256 = ? AND quarantined_by IS NOT NULL
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT 1
|
||||
FROM remote_media_cache
|
||||
WHERE sha256 = ? AND quarantined_by IS NOT NULL
|
||||
LIMIT 1
|
||||
"""
|
||||
txn.execute(sql, (sha256, sha256))
|
||||
row = txn.fetchone()
|
||||
return row is not None
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_matching_media_txn",
|
||||
get_matching_media_txn,
|
||||
"local_media_repository",
|
||||
sha256,
|
||||
)
|
||||
|
||||
@@ -759,17 +759,37 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
external_id: id on that system
|
||||
user_id: complete mxid that it is mapped to
|
||||
"""
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_by_external_id, (auth_provider, external_id)
|
||||
)
|
||||
|
||||
self.db_pool.simple_insert_txn(
|
||||
# This INSERT ... ON CONFLICT DO NOTHING statement will cause a
|
||||
# 'could not serialize access due to concurrent update'
|
||||
# if the row is added concurrently by another transaction.
|
||||
# This is exactly what we want, as it makes the transaction get retried
|
||||
# in a new snapshot where we can check for a genuine conflict.
|
||||
was_inserted = self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
values={
|
||||
"auth_provider": auth_provider,
|
||||
"external_id": external_id,
|
||||
"user_id": user_id,
|
||||
},
|
||||
keyvalues={"auth_provider": auth_provider, "external_id": external_id},
|
||||
values={},
|
||||
insertion_values={"user_id": user_id},
|
||||
)
|
||||
|
||||
if not was_inserted:
|
||||
existing_id = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
keyvalues={"auth_provider": auth_provider, "user_id": user_id},
|
||||
retcol="external_id",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
if existing_id != external_id:
|
||||
raise ExternalIDReuseException(
|
||||
f"{user_id!r} has external id {existing_id!r} for {auth_provider} but trying to add {external_id!r}"
|
||||
)
|
||||
|
||||
async def remove_user_external_id(
|
||||
self, auth_provider: str, external_id: str, user_id: str
|
||||
) -> None:
|
||||
@@ -789,6 +809,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
},
|
||||
desc="remove_user_external_id",
|
||||
)
|
||||
await self.invalidate_cache_and_stream(
|
||||
"get_user_by_external_id", (auth_provider, external_id)
|
||||
)
|
||||
|
||||
async def replace_user_external_id(
|
||||
self,
|
||||
@@ -809,29 +832,20 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
ExternalIDReuseException if the new external_id could not be mapped.
|
||||
"""
|
||||
|
||||
def _remove_user_external_ids_txn(
|
||||
def _replace_user_external_id_txn(
|
||||
txn: LoggingTransaction,
|
||||
user_id: str,
|
||||
) -> None:
|
||||
"""Remove all mappings from external user ids to a mxid
|
||||
If these mappings are not found, this method does nothing.
|
||||
|
||||
Args:
|
||||
user_id: complete mxid that it is mapped to
|
||||
"""
|
||||
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="user_external_ids",
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
|
||||
def _replace_user_external_id_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> None:
|
||||
_remove_user_external_ids_txn(txn, user_id)
|
||||
|
||||
for auth_provider, external_id in record_external_ids:
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_by_external_id, (auth_provider, external_id)
|
||||
)
|
||||
|
||||
self._record_user_external_id_txn(
|
||||
txn,
|
||||
auth_provider,
|
||||
@@ -847,6 +861,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
except self.database_engine.module.IntegrityError:
|
||||
raise ExternalIDReuseException()
|
||||
|
||||
@cached()
|
||||
async def get_user_by_external_id(
|
||||
self, auth_provider: str, external_id: str
|
||||
) -> Optional[str]:
|
||||
|
||||
@@ -51,11 +51,15 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.events import EventBase
|
||||
from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
|
||||
from synapse.storage._base import db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage._base import (
|
||||
db_to_json,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_tuple_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.types import Cursor
|
||||
@@ -1127,6 +1131,109 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
|
||||
return local_media_ids
|
||||
|
||||
def _quarantine_local_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
hashes: Set[str],
|
||||
media_ids: Set[str],
|
||||
quarantined_by: Optional[str],
|
||||
) -> int:
|
||||
"""Quarantine and unquarantine local media items.
|
||||
|
||||
Args:
|
||||
txn (cursor)
|
||||
hashes: A set of sha256 hashes for any media that should be quarantined
|
||||
media_ids: A set of media IDs for any media that should be quarantined
|
||||
quarantined_by: The ID of the user who initiated the quarantine request
|
||||
If it is `None` media will be removed from quarantine
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
total_media_quarantined = 0
|
||||
|
||||
# Effectively a legacy path, update any media that was explicitly named.
|
||||
if media_ids:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "media_id", media_ids
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
|
||||
if quarantined_by is not None:
|
||||
sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
# Update any media that was identified via hash.
|
||||
if hashes:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "sha256", hashes
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
|
||||
if quarantined_by is not None:
|
||||
sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
|
||||
def _quarantine_remote_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
hashes: Set[str],
|
||||
media: Set[Tuple[str, str]],
|
||||
quarantined_by: Optional[str],
|
||||
) -> int:
|
||||
"""Quarantine and unquarantine remote items
|
||||
|
||||
Args:
|
||||
txn (cursor)
|
||||
hashes: A set of sha256 hashes for any media that should be quarantined
|
||||
media_ids: A set of tuples (media_origin, media_id) for any media that should be quarantined
|
||||
quarantined_by: The ID of the user who initiated the quarantine request
|
||||
If it is `None` media will be removed from quarantine
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
total_media_quarantined = 0
|
||||
|
||||
if media:
|
||||
sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause(
|
||||
txn.database_engine,
|
||||
("media_origin", "media_id"),
|
||||
media,
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_in_list_clause}"""
|
||||
|
||||
txn.execute(sql, [quarantined_by] + sql_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
total_media_quarantined = 0
|
||||
if hashes:
|
||||
sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "sha256", hashes
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE {sql_many_clause_sql}"""
|
||||
txn.execute(sql, [quarantined_by] + sql_many_clause_args)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
|
||||
def _quarantine_media_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
@@ -1146,40 +1253,49 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
Returns:
|
||||
The total number of media items quarantined
|
||||
"""
|
||||
hashes = set()
|
||||
media_ids = set()
|
||||
remote_media = set()
|
||||
|
||||
# Update all the tables to set the quarantined_by flag
|
||||
sql = """
|
||||
UPDATE local_media_repository
|
||||
SET quarantined_by = ?
|
||||
WHERE media_id = ?
|
||||
"""
|
||||
|
||||
# set quarantine
|
||||
if quarantined_by is not None:
|
||||
sql += "AND safe_from_quarantine = FALSE"
|
||||
txn.executemany(
|
||||
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
|
||||
# First, determine the hashes of the media we want to delete.
|
||||
# We also want the media_ids for any media that lacks a hash.
|
||||
if local_mxcs:
|
||||
hash_sql_many_clause_sql, hash_sql_many_clause_args = (
|
||||
make_in_list_sql_clause(txn.database_engine, "media_id", local_mxcs)
|
||||
)
|
||||
# remove from quarantine
|
||||
else:
|
||||
txn.executemany(
|
||||
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
|
||||
hash_sql = f"SELECT sha256, media_id FROM local_media_repository WHERE {hash_sql_many_clause_sql}"
|
||||
if quarantined_by is not None:
|
||||
hash_sql += " AND safe_from_quarantine = FALSE"
|
||||
|
||||
txn.execute(hash_sql, hash_sql_many_clause_args)
|
||||
for sha256, media_id in txn:
|
||||
if sha256:
|
||||
hashes.add(sha256)
|
||||
else:
|
||||
media_ids.add(media_id)
|
||||
|
||||
# Do the same for remote media
|
||||
if remote_mxcs:
|
||||
hash_sql_in_list_clause, hash_sql_args = make_tuple_in_list_sql_clause(
|
||||
txn.database_engine,
|
||||
("media_origin", "media_id"),
|
||||
remote_mxcs,
|
||||
)
|
||||
|
||||
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
||||
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
|
||||
hash_sql = f"SELECT sha256, media_origin, media_id FROM remote_media_cache WHERE {hash_sql_in_list_clause}"
|
||||
txn.execute(hash_sql, hash_sql_args)
|
||||
for sha256, media_origin, media_id in txn:
|
||||
if sha256:
|
||||
hashes.add(sha256)
|
||||
else:
|
||||
remote_media.add((media_origin, media_id))
|
||||
|
||||
txn.executemany(
|
||||
"""
|
||||
UPDATE remote_media_cache
|
||||
SET quarantined_by = ?
|
||||
WHERE media_origin = ? AND media_id = ?
|
||||
""",
|
||||
[(quarantined_by, origin, media_id) for origin, media_id in remote_mxcs],
|
||||
count = self._quarantine_local_media_txn(txn, hashes, media_ids, quarantined_by)
|
||||
count += self._quarantine_remote_media_txn(
|
||||
txn, hashes, remote_media, quarantined_by
|
||||
)
|
||||
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
||||
|
||||
return total_media_quarantined
|
||||
return count
|
||||
|
||||
async def block_room(self, room_id: str, user_id: str) -> None:
|
||||
"""Marks the room as blocked.
|
||||
|
||||
@@ -79,6 +79,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
_MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update"
|
||||
_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership"
|
||||
_POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE = 1000
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
@@ -1606,6 +1607,62 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
from_ts,
|
||||
)
|
||||
|
||||
async def set_room_participation(self, user_id: str, room_id: str) -> None:
|
||||
"""
|
||||
Record the provided user as participating in the given room
|
||||
|
||||
Args:
|
||||
user_id: the user ID of the user
|
||||
room_id: ID of the room to set the participant in
|
||||
"""
|
||||
|
||||
def _set_room_participation_txn(
|
||||
txn: LoggingTransaction, user_id: str, room_id: str
|
||||
) -> None:
|
||||
sql = """
|
||||
UPDATE room_memberships
|
||||
SET participant = true
|
||||
WHERE event_id IN (
|
||||
SELECT event_id FROM local_current_membership
|
||||
WHERE user_id = ? AND room_id = ?
|
||||
)
|
||||
AND NOT participant
|
||||
"""
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"_set_room_participation_txn", _set_room_participation_txn, user_id, room_id
|
||||
)
|
||||
|
||||
async def get_room_participation(self, user_id: str, room_id: str) -> bool:
|
||||
"""
|
||||
Check whether a user is listed as a participant in a room
|
||||
|
||||
Args:
|
||||
user_id: user ID of the user
|
||||
room_id: ID of the room to check in
|
||||
"""
|
||||
|
||||
def _get_room_participation_txn(
|
||||
txn: LoggingTransaction, user_id: str, room_id: str
|
||||
) -> bool:
|
||||
sql = """
|
||||
SELECT participant
|
||||
FROM local_current_membership AS l
|
||||
INNER JOIN room_memberships AS r USING (event_id)
|
||||
WHERE l.user_id = ?
|
||||
AND l.room_id = ?
|
||||
"""
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
res = txn.fetchone()
|
||||
if res:
|
||||
return res[0]
|
||||
return False
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"_get_room_participation_txn", _get_room_participation_txn, user_id, room_id
|
||||
)
|
||||
|
||||
|
||||
class RoomMemberBackgroundUpdateStore(SQLBaseStore):
|
||||
def __init__(
|
||||
@@ -1636,6 +1693,93 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
|
||||
columns=["user_id", "room_id"],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"populate_participant_bg_update", self._populate_participant
|
||||
)
|
||||
|
||||
async def _populate_participant(self, progress: JsonDict, batch_size: int) -> int:
|
||||
"""
|
||||
Background update to populate column `participant` on `room_memberships` table
|
||||
|
||||
A 'participant' is someone who is currently joined to a room and has sent at least
|
||||
one `m.room.message` or `m.room.encrypted` event.
|
||||
|
||||
This background update will set the `participant` column across all rows in
|
||||
`room_memberships` based on the user's *current* join status, and if
|
||||
they've *ever* sent a message or encrypted event. Therefore one should
|
||||
never assume the `participant` column's value is based solely on whether
|
||||
the user participated in a previous "session" (where a "session" is defined
|
||||
as a period between the user joining and leaving). See
|
||||
https://github.com/element-hq/synapse/pull/18068#discussion_r1931070291
|
||||
for further detail.
|
||||
"""
|
||||
stream_token = progress.get("last_stream_token", None)
|
||||
|
||||
def _get_max_stream_token_txn(txn: LoggingTransaction) -> int:
|
||||
sql = """
|
||||
SELECT event_stream_ordering from room_memberships
|
||||
ORDER BY event_stream_ordering DESC
|
||||
LIMIT 1;
|
||||
"""
|
||||
txn.execute(sql)
|
||||
res = txn.fetchone()
|
||||
if not res or not res[0]:
|
||||
return 0
|
||||
return res[0]
|
||||
|
||||
def _background_populate_participant_txn(
|
||||
txn: LoggingTransaction, stream_token: str
|
||||
) -> None:
|
||||
sql = """
|
||||
UPDATE room_memberships
|
||||
SET participant = True
|
||||
FROM (
|
||||
SELECT DISTINCT c.state_key, e.room_id
|
||||
FROM current_state_events AS c
|
||||
INNER JOIN events AS e ON c.room_id = e.room_id
|
||||
WHERE c.membership = 'join'
|
||||
AND c.state_key = e.sender
|
||||
AND (
|
||||
e.type = 'm.room.message'
|
||||
OR e.type = 'm.room.encrypted'
|
||||
)
|
||||
) AS subquery
|
||||
WHERE room_memberships.user_id = subquery.state_key
|
||||
AND room_memberships.room_id = subquery.room_id
|
||||
AND room_memberships.event_stream_ordering <= ?
|
||||
AND room_memberships.event_stream_ordering > ?;
|
||||
"""
|
||||
batch = int(stream_token) - _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
txn.execute(sql, (stream_token, batch))
|
||||
|
||||
if stream_token is None:
|
||||
stream_token = await self.db_pool.runInteraction(
|
||||
"_get_max_stream_token", _get_max_stream_token_txn
|
||||
)
|
||||
|
||||
if stream_token < 0:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
"populate_participant_bg_update"
|
||||
)
|
||||
return _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"_background_populate_participant_txn",
|
||||
_background_populate_participant_txn,
|
||||
stream_token,
|
||||
)
|
||||
|
||||
progress["last_stream_token"] = (
|
||||
stream_token - _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
)
|
||||
await self.db_pool.runInteraction(
|
||||
"populate_participant_bg_update",
|
||||
self.db_pool.updates._background_update_progress_txn,
|
||||
"populate_participant_bg_update",
|
||||
progress,
|
||||
)
|
||||
return _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
|
||||
async def _background_add_membership_profile(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
# Copyright (C) 2023, 2025 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
@@ -61,6 +61,13 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
columns=("required_state_id",),
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="sliding_sync_membership_snapshots_membership_event_id_idx",
|
||||
index_name="sliding_sync_membership_snapshots_membership_event_id_idx",
|
||||
table="sliding_sync_membership_snapshots",
|
||||
columns=("membership_event_id",),
|
||||
)
|
||||
|
||||
async def get_latest_bump_stamp_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
|
||||
@@ -48,6 +48,7 @@ from synapse.storage.database import (
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.storage.util.sequence import build_sequence_generator
|
||||
from synapse.types import MutableStateMap, StateKey, StateMap
|
||||
@@ -914,6 +915,12 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
) -> None:
|
||||
# Delete all edges that reference a state group linked to room_id
|
||||
logger.info("[purge] removing %s from state_group_edges", room_id)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# Disable statement timeouts for this transaction; purging rooms can
|
||||
# take a while!
|
||||
txn.execute("SET LOCAL statement_timeout = 0")
|
||||
|
||||
txn.execute(
|
||||
"""
|
||||
DELETE FROM state_group_edges AS sge WHERE sge.state_group IN (
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
SCHEMA_VERSION = 89 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 91 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
@@ -158,6 +158,9 @@ Changes in SCHEMA_VERSION = 88
|
||||
|
||||
Changes in SCHEMA_VERSION = 89
|
||||
- Add `state_groups_pending_deletion` and `state_groups_persisting` tables.
|
||||
|
||||
Changes in SCHEMA_VERSION = 90
|
||||
- Add a column `participant` to `room_memberships` table
|
||||
- Add background update to delete unreferenced state groups.
|
||||
"""
|
||||
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8901, 'sliding_sync_membership_snapshots_membership_event_id_idx', '{}');
|
||||
@@ -0,0 +1,20 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Add a column `participant` to `room_memberships` table to track whether a room member has sent
|
||||
-- a `m.room.message` or `m.room.encrypted` event into a room they are a member of
|
||||
ALTER TABLE room_memberships ADD COLUMN participant BOOLEAN DEFAULT FALSE;
|
||||
|
||||
-- Add a background update to populate `participant` column
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(9001, 'populate_participant_bg_update', '{}');
|
||||
28
synapse/storage/schema/main/delta/91/01_media_hash.sql
Normal file
28
synapse/storage/schema/main/delta/91/01_media_hash.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Store the SHA256 content hash of media files.
|
||||
ALTER TABLE local_media_repository ADD COLUMN sha256 TEXT;
|
||||
ALTER TABLE remote_media_cache ADD COLUMN sha256 TEXT;
|
||||
|
||||
-- Add a background updates to handle creating the new index.
|
||||
--
|
||||
-- Note that the ordering of the update is not following the usual scheme. This
|
||||
-- is because when upgrading from Synapse 1.127, this index is fairly important
|
||||
-- to have up quickly, so that it doesn't tank performance, which is why it is
|
||||
-- scheduled before other background updates in the 1.127 -> 1.128 upgrade
|
||||
INSERT INTO
|
||||
background_updates (ordering, update_name, progress_json)
|
||||
VALUES
|
||||
(8890, 'local_media_repository_sha256_idx', '{}'),
|
||||
(8891, 'remote_media_cache_sha256_idx', '{}');
|
||||
@@ -13,4 +13,4 @@
|
||||
|
||||
-- Add a background update to delete any unreferenced state groups
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8902, 'delete_unreferenced_state_groups_bg_update', '{}');
|
||||
(9002, 'mark_unreferenced_state_groups_for_deletion_bg_update', '{}');
|
||||
@@ -0,0 +1,15 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Remove the old unreferenced state group deletion background update if it exists
|
||||
DELETE FROM background_updates WHERE update_name = 'delete_unreferenced_state_groups_bg_update';
|
||||
@@ -49,6 +49,6 @@ class _BackgroundUpdates:
|
||||
"sliding_sync_membership_snapshots_fix_forgotten_column_bg_update"
|
||||
)
|
||||
|
||||
DELETE_UNREFERENCED_STATE_GROUPS_BG_UPDATE = (
|
||||
"delete_unreferenced_state_groups_bg_update"
|
||||
MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = (
|
||||
"mark_unreferenced_state_groups_for_deletion_bg_update"
|
||||
)
|
||||
|
||||
@@ -101,7 +101,13 @@ class ResponseCache(Generic[KV]):
|
||||
used rather than trying to compute a new response.
|
||||
"""
|
||||
|
||||
def __init__(self, clock: Clock, name: str, timeout_ms: float = 0):
|
||||
def __init__(
|
||||
self,
|
||||
clock: Clock,
|
||||
name: str,
|
||||
timeout_ms: float = 0,
|
||||
enable_logging: bool = True,
|
||||
):
|
||||
self._result_cache: Dict[KV, ResponseCacheEntry] = {}
|
||||
|
||||
self.clock = clock
|
||||
@@ -109,6 +115,7 @@ class ResponseCache(Generic[KV]):
|
||||
|
||||
self._name = name
|
||||
self._metrics = register_cache("response_cache", name, self, resizable=False)
|
||||
self._enable_logging = enable_logging
|
||||
|
||||
def size(self) -> int:
|
||||
return len(self._result_cache)
|
||||
@@ -246,9 +253,12 @@ class ResponseCache(Generic[KV]):
|
||||
"""
|
||||
entry = self._get(key)
|
||||
if not entry:
|
||||
logger.debug(
|
||||
"[%s]: no cached result for [%s], calculating new one", self._name, key
|
||||
)
|
||||
if self._enable_logging:
|
||||
logger.debug(
|
||||
"[%s]: no cached result for [%s], calculating new one",
|
||||
self._name,
|
||||
key,
|
||||
)
|
||||
context = ResponseCacheContext(cache_key=key)
|
||||
if cache_context:
|
||||
kwargs["cache_context"] = context
|
||||
@@ -269,12 +279,15 @@ class ResponseCache(Generic[KV]):
|
||||
return await make_deferred_yieldable(entry.result.observe())
|
||||
|
||||
result = entry.result.observe()
|
||||
if result.called:
|
||||
logger.info("[%s]: using completed cached result for [%s]", self._name, key)
|
||||
else:
|
||||
logger.info(
|
||||
"[%s]: using incomplete cached result for [%s]", self._name, key
|
||||
)
|
||||
if self._enable_logging:
|
||||
if result.called:
|
||||
logger.info(
|
||||
"[%s]: using completed cached result for [%s]", self._name, key
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"[%s]: using incomplete cached result for [%s]", self._name, key
|
||||
)
|
||||
|
||||
span_context = entry.opentracing_span_context
|
||||
with start_active_span_follows_from(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user