Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6fa43cb0b4 | |||
| f778ac32c1 | |||
| 003fc725db | |||
| 934f99a694 | |||
| 78e8ec6161 | |||
| f59419377d | |||
| a3b34dfafd | |||
| cb82a4a687 | |||
| 0c0ece9612 | |||
| 46e3f6756c | |||
| dedd6e35e6 | |||
| a3c7b3ecb9 | |||
| bf594a28a8 | |||
| c757969597 | |||
| 4cb0eeabdf | |||
| 4d7826b006 | |||
| ab7e5a2b17 | |||
| 4c51247cb3 | |||
| 4dd82e581a | |||
| 6e69338abc | |||
| 79ea4bed33 | |||
| 9ef4ca173e | |||
| 24b38733df | |||
| 4602b56643 | |||
| 6c460b3eae | |||
| cd4f4223de |
@@ -35,55 +35,46 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For PRs, we only run each type of test with the oldest and newest Python
|
||||
# version that's supported. The oldest version ensures we don't accidentally
|
||||
# introduce syntax or code that's too new, and the newest ensures we don't use
|
||||
# code that's been dropped in the latest supported Python version.
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.10 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
},
|
||||
{
|
||||
"python-version": "3.14",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
# Otherwise, check all supported Python versions.
|
||||
#
|
||||
# Avoiding running all of these versions on every PR saves on CI time.
|
||||
trial_sqlite_tests.extend(
|
||||
{
|
||||
"python-version": version,
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.11", "3.12", "3.13")
|
||||
for version in ("3.11", "3.12", "3.13", "3.14")
|
||||
)
|
||||
|
||||
# Only test postgres against the earliest and latest Python versions that we
|
||||
# support in order to save on CI time.
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"postgres-version": "13",
|
||||
"extras": "all",
|
||||
},
|
||||
{
|
||||
"python-version": "3.14",
|
||||
"database": "postgres",
|
||||
"postgres-version": "17",
|
||||
"extras": "all",
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Ensure that Synapse passes unit tests even with no extra dependencies installed.
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.14",
|
||||
"database": "postgres",
|
||||
"postgres-version": "17",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.10",
|
||||
|
||||
@@ -16,23 +16,20 @@ export VIRTUALENV_NO_DOWNLOAD=1
|
||||
# to select the lowest possible versions, rather than resorting to this sed script.
|
||||
|
||||
# Patch the project definitions in-place:
|
||||
# - `-E` use extended regex syntax.
|
||||
# - Don't modify the line that defines required Python versions.
|
||||
# - Replace all lower and tilde bounds with exact bounds.
|
||||
# - Replace all caret bounds with exact bounds.
|
||||
# - Delete all lines referring to psycopg2 - so no testing of postgres support.
|
||||
# - Replace all lower and tilde bounds with exact bounds
|
||||
# - Replace all caret bounds---but not the one that defines the supported Python version!
|
||||
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
|
||||
# - Use pyopenssl 17.0, which is the oldest version that works with
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
sed -i -E '
|
||||
/^\s*requires-python\s*=/b
|
||||
s/[~>]=/==/g
|
||||
s/\^/==/g
|
||||
/psycopg2/d
|
||||
s/pyOpenSSL\s*==\s*16\.0\.0"/pyOpenSSL==17.0.0"/
|
||||
/systemd/d
|
||||
' pyproject.toml
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e '/^python = "^/!s/\^/==/g' \
|
||||
-e "/psycopg2/d" \
|
||||
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
|
||||
-e '/systemd/d' \
|
||||
pyproject.toml
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
|
||||
@@ -26,8 +26,3 @@ c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
# Update black to 23.1.0 (https://github.com/matrix-org/synapse/pull/15103)
|
||||
9bb2eac71962970d02842bca441f4bcdbbf93a11
|
||||
|
||||
# Use type hinting generics in standard collections (https://github.com/element-hq/synapse/pull/19046)
|
||||
fc244bb592aa481faf28214a2e2ce3bb4e95d990
|
||||
|
||||
# Write union types as X | Y where possible (https://github.com/element-hq/synapse/pull/19111)
|
||||
fcac7e0282b074d4bd3414d1c9c181e9701875d9
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
@@ -123,7 +123,7 @@ jobs:
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Calculate docker image tag
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
with:
|
||||
images: ${{ matrix.repository }}
|
||||
flavor: |
|
||||
|
||||
@@ -13,7 +13,7 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
@@ -18,14 +18,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy, rustfmt
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
|
||||
@@ -42,12 +42,12 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
@@ -77,13 +77,13 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -152,13 +152,13 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
@@ -202,14 +202,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out synapse codebase
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -234,7 +234,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -16,8 +16,8 @@ jobs:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
|
||||
@@ -33,17 +33,17 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
|
||||
@@ -27,8 +27,8 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: set-distros
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: src
|
||||
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -132,9 +132,9 @@ jobs:
|
||||
os: "ubuntu-24.04-arm"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
@@ -150,14 +150,12 @@ jobs:
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# The platforms that we build for are determined by the
|
||||
# `tool.cibuildwheel.skip` option in `pyproject.toml`.
|
||||
|
||||
# We skip testing wheels for the following platforms in CI:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
#
|
||||
# pp3*-* (PyPy wheels) broke in CI (TODO: investigate).
|
||||
# musl: (TODO: investigate).
|
||||
CIBW_TEST_SKIP: pp3*-* *musl*
|
||||
# cp39-*: Python 3.9 is EOL.
|
||||
# cp3??t-*: Free-threaded builds are not currently supported.
|
||||
CIBW_TEST_SKIP: pp3*-* cp39-* cp3??t-* *i686* *musl*
|
||||
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
@@ -170,8 +168,8 @@ jobs:
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ jobs:
|
||||
name: Ensure Synapse config schema is valid
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install check-jsonschema
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
name: Ensure generated documentation is up-to-date
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install PyYAML
|
||||
|
||||
+44
-45
@@ -86,12 +86,12 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -106,18 +106,18 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -151,13 +151,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -187,20 +187,19 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
# Only run on pull_request events, targeting develop/release branches, and skip when the PR author is dependabot[bot].
|
||||
if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }}
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
@@ -214,14 +213,14 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
components: clippy
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
@@ -233,14 +232,14 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2025-04-23
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
|
||||
@@ -251,13 +250,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -287,7 +286,7 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
@@ -296,7 +295,7 @@ jobs:
|
||||
# `.rustfmt.toml`.
|
||||
toolchain: nightly-2025-04-23
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
@@ -307,8 +306,8 @@ jobs:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
@@ -355,8 +354,8 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
@@ -376,7 +375,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -394,7 +393,7 @@ jobs:
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -432,13 +431,13 @@ jobs:
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
@@ -447,7 +446,7 @@ jobs:
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -497,7 +496,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -547,7 +546,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
@@ -555,7 +554,7 @@ jobs:
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
@@ -594,7 +593,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -618,7 +617,7 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
postgres-version: "13"
|
||||
|
||||
- python-version: "3.14"
|
||||
postgres-version: "17"
|
||||
@@ -638,7 +637,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
@@ -693,7 +692,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout synapse codebase
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -701,12 +700,12 @@ jobs:
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -729,13 +728,13 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: cargo test
|
||||
|
||||
@@ -749,13 +748,13 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- run: cargo bench --no-run
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
# This field is case-sensitive.
|
||||
TARGET_STATUS: Needs info
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Only clone the script file we care about, instead of the whole repo.
|
||||
sparse-checkout: .ci/scripts/triage_labelled_issue.sh
|
||||
|
||||
@@ -43,13 +43,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -70,14 +70,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -117,13 +117,13 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
|
||||
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
|
||||
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
@@ -175,14 +175,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
+1
-104
@@ -1,98 +1,4 @@
|
||||
# Synapse 1.143.0 (2025-11-25)
|
||||
|
||||
## Dropping support for PostgreSQL 13
|
||||
|
||||
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 13, as it is no longer supported upstream.
|
||||
This release of Synapse requires PostgreSQL 14+.
|
||||
|
||||
No significant changes since 1.143.0rc2.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.143.0rc2 (2025-11-18)
|
||||
|
||||
## Dropping support for PostgreSQL 13
|
||||
|
||||
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 13, as it is no longer supported upstream.
|
||||
This release of Synapse requires PostgreSQL 14+.
|
||||
|
||||
|
||||
## Internal Changes
|
||||
|
||||
- Fixes docker image creation in the release workflow.
|
||||
|
||||
|
||||
|
||||
# Synapse 1.143.0rc1 (2025-11-18)
|
||||
|
||||
## Features
|
||||
|
||||
- Support multiple config files in `register_new_matrix_user`. ([\#18784](https://github.com/element-hq/synapse/issues/18784))
|
||||
- Remove authentication from `POST /_matrix/client/v1/delayed_events`, and allow calling this endpoint with the update action to take (`send`/`cancel`/`restart`) in the request path instead of the body. ([\#19152](https://github.com/element-hq/synapse/issues/19152))
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed a longstanding bug where background updates were only run on the `main` database. ([\#19181](https://github.com/element-hq/synapse/issues/19181))
|
||||
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
|
||||
- Fix the SQLite-to-PostgreSQL migration script to correctly migrate a boolean column in the `delayed_events` table. ([\#19155](https://github.com/element-hq/synapse/issues/19155))
|
||||
|
||||
## Improved Documentation
|
||||
|
||||
- Improve documentation around streams, particularly ID generators and adding new streams. ([\#18943](https://github.com/element-hq/synapse/issues/18943))
|
||||
|
||||
## Deprecations and Removals
|
||||
|
||||
- Remove support for PostgreSQL 13. ([\#19170](https://github.com/element-hq/synapse/issues/19170))
|
||||
|
||||
## Internal Changes
|
||||
|
||||
- Provide additional servers with federation room directory results. ([\#18970](https://github.com/element-hq/synapse/issues/18970))
|
||||
- Add a shortcut return when there are no events to purge. ([\#19093](https://github.com/element-hq/synapse/issues/19093))
|
||||
- Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10. ([\#19111](https://github.com/element-hq/synapse/issues/19111))
|
||||
- Reduce cardinality of `synapse_storage_events_persisted_events_sep_total` metric by removing `origin_entity` label. This also separates out events sent by local application services by changing the `origin_type` for such events to `application_service`. The `type` field also only tracks common event types, and anything else is bucketed under `*other*`. ([\#19133](https://github.com/element-hq/synapse/issues/19133), [\#19168](https://github.com/element-hq/synapse/issues/19168))
|
||||
- Run trial tests on Python 3.14 for PRs. ([\#19135](https://github.com/element-hq/synapse/issues/19135))
|
||||
- Update `pyproject.toml` project metadata to be compatible with standard Python packaging tooling. ([\#19137](https://github.com/element-hq/synapse/issues/19137))
|
||||
- Minor speed up of processing of inbound replication. ([\#19138](https://github.com/element-hq/synapse/issues/19138), [\#19145](https://github.com/element-hq/synapse/issues/19145), [\#19146](https://github.com/element-hq/synapse/issues/19146))
|
||||
- Ignore recent Python language refactors from git blame (`.git-blame-ignore-revs`). ([\#19150](https://github.com/element-hq/synapse/issues/19150))
|
||||
- Bump lower bounds of dependencies `parameterized` to `0.9.0` and `idna` to `3.3` as those are the first to advertise support for Python 3.10. ([\#19167](https://github.com/element-hq/synapse/issues/19167))
|
||||
- Point out which event caused the exception when checking [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) redactions. ([\#19169](https://github.com/element-hq/synapse/issues/19169))
|
||||
- Restore printing `sentinel` for the log record `request` when no logcontext is active. ([\#19172](https://github.com/element-hq/synapse/issues/19172))
|
||||
- Add debug logs to track `Clock` utilities. ([\#19173](https://github.com/element-hq/synapse/issues/19173))
|
||||
- Remove explicit python version skips in `cibuildwheel` config as it's no longer required after [#19137](https://github.com/element-hq/synapse/pull/19137). ([\#19177](https://github.com/element-hq/synapse/issues/19177))
|
||||
- Fix potential lost logcontext when `PerDestinationQueue.shutdown(...)` is called. ([\#19178](https://github.com/element-hq/synapse/issues/19178))
|
||||
- Fix bad deferred logcontext handling across the codebase. ([\#19180](https://github.com/element-hq/synapse/issues/19180))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bytes from 1.10.1 to 1.11.0. ([\#19193](https://github.com/element-hq/synapse/issues/19193))
|
||||
* Bump click from 8.1.8 to 8.3.1. ([\#19195](https://github.com/element-hq/synapse/issues/19195))
|
||||
* Bump cryptography from 43.0.3 to 45.0.7. ([\#19159](https://github.com/element-hq/synapse/issues/19159))
|
||||
* Bump docker/metadata-action from 5.8.0 to 5.9.0. ([\#19161](https://github.com/element-hq/synapse/issues/19161))
|
||||
* Bump pydantic from 2.12.3 to 2.12.4. ([\#19158](https://github.com/element-hq/synapse/issues/19158))
|
||||
* Bump pyo3-log from 0.13.1 to 0.13.2. ([\#19156](https://github.com/element-hq/synapse/issues/19156))
|
||||
* Bump ruff from 0.14.3 to 0.14.5. ([\#19196](https://github.com/element-hq/synapse/issues/19196))
|
||||
* Bump sentry-sdk from 2.34.1 to 2.43.0. ([\#19157](https://github.com/element-hq/synapse/issues/19157))
|
||||
* Bump sentry-sdk from 2.43.0 to 2.44.0. ([\#19197](https://github.com/element-hq/synapse/issues/19197))
|
||||
* Bump tomli from 2.2.1 to 2.3.0. ([\#19194](https://github.com/element-hq/synapse/issues/19194))
|
||||
* Bump types-netaddr from 1.3.0.20240530 to 1.3.0.20251108. ([\#19160](https://github.com/element-hq/synapse/issues/19160))
|
||||
|
||||
|
||||
|
||||
# Synapse 1.142.1 (2025-11-18)
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.142.0 (2025-11-11)
|
||||
# Synapse 1.142.0rc4 (2025-11-07)
|
||||
|
||||
## Dropped support for Python 3.9
|
||||
|
||||
@@ -123,15 +29,6 @@ of these wheels downstream, please reach out to us in
|
||||
[#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd
|
||||
love to hear from you!
|
||||
|
||||
## Internal Changes
|
||||
|
||||
- Properly stop building wheels for Python 3.9 and free-threaded CPython. ([\#19154](https://github.com/element-hq/synapse/issues/19154))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.142.0rc4 (2025-11-07)
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. ([\#19144](https://github.com/element-hq/synapse/issues/19144))
|
||||
|
||||
Generated
+7
-6
@@ -73,9 +73,9 @@ checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.11.0"
|
||||
version = "1.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
|
||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
@@ -374,11 +374,12 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.4.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a"
|
||||
checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
@@ -850,9 +851,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.13.2"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f8bae9ad5ba08b0b0ed2bb9c2bdbaeccc69cafca96d78cf0fbcea0d45d122bb"
|
||||
checksum = "d359e20231345f21a3b5b6aea7e73f4dc97e1712ef3bfe2d88997ac6a308d784"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Improve event filtering for Simplified Sliding Sync.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times.
|
||||
@@ -0,0 +1 @@
|
||||
Add experimental support for MSC4360: Sliding Sync Threads Extension.
|
||||
@@ -0,0 +1 @@
|
||||
Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10.
|
||||
@@ -0,0 +1 @@
|
||||
Minor speed up of processing of inbound replication.
|
||||
@@ -0,0 +1 @@
|
||||
Minor speed up of processing of inbound replication.
|
||||
@@ -0,0 +1 @@
|
||||
Minor speed up of processing of inbound replication.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking).
|
||||
@@ -1 +0,0 @@
|
||||
Allow restarting delayed event timeouts on workers.
|
||||
@@ -1 +0,0 @@
|
||||
Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable).
|
||||
@@ -1 +0,0 @@
|
||||
Expire sliding sync connections that are too old or have too much pending data.
|
||||
@@ -1 +0,0 @@
|
||||
Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch.
|
||||
@@ -1 +0,0 @@
|
||||
Auto-fix trailing spaces in multi-line strings and comments when running the lint script.
|
||||
@@ -1 +0,0 @@
|
||||
Move towards using a dedicated `Duration` type.
|
||||
@@ -1 +0,0 @@
|
||||
Improve robustness of the SQL schema linting in CI.
|
||||
@@ -1 +0,0 @@
|
||||
Stop building release wheels for MacOS.
|
||||
@@ -1 +0,0 @@
|
||||
Move towards using a dedicated `Duration` type.
|
||||
@@ -1 +0,0 @@
|
||||
Add a unit test for ensuring associated refresh tokens are erased when a device is delted.
|
||||
@@ -1 +0,0 @@
|
||||
Prompt user to consider adding future deprecations to the changelog in release script.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where invalid `canonical_alias` content would return 500 instead of 400.
|
||||
@@ -1 +0,0 @@
|
||||
Document in the `--config-path` help how multiple files are merged - by merging them shallowly.
|
||||
@@ -1 +0,0 @@
|
||||
Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`.
|
||||
@@ -2166,10 +2166,10 @@
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_persisted_events_sep_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{origin_type}}",
|
||||
"legendFormat": "{{type}}",
|
||||
"refId": "D"
|
||||
}
|
||||
],
|
||||
@@ -2254,7 +2254,7 @@
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum by(type) (rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 2,
|
||||
@@ -2294,6 +2294,99 @@
|
||||
"align": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {
|
||||
"irc-freenode (local)": "#EAB839"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"decimals": 1,
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 44
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 44,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"hideEmpty": true,
|
||||
"hideZero": true,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "9.2.2",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{origin_entity}} ({{origin_type}})",
|
||||
"refId": "A",
|
||||
"step": 20
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeRegions": [],
|
||||
"title": "Events/s by Origin",
|
||||
"tooltip": {
|
||||
"shared": false,
|
||||
"sort": 2,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"mode": "time",
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "hertz",
|
||||
"logBase": 1,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
|
||||
@@ -44,3 +44,31 @@ groups:
|
||||
###
|
||||
### End of 'Prometheus Console Only' rules block
|
||||
###
|
||||
|
||||
|
||||
###
|
||||
### Grafana Only
|
||||
### The following rules are only needed if you use the Grafana dashboard
|
||||
### in contrib/grafana/synapse.json
|
||||
###
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
###
|
||||
### End of 'Grafana Only' rules block
|
||||
###
|
||||
|
||||
Vendored
-30
@@ -1,33 +1,3 @@
|
||||
matrix-synapse-py3 (1.143.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.143.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Nov 2025 08:44:56 -0700
|
||||
|
||||
matrix-synapse-py3 (1.143.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.143.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 17:36:08 -0700
|
||||
|
||||
matrix-synapse-py3 (1.143.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.143.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 13:08:39 -0700
|
||||
|
||||
matrix-synapse-py3 (1.142.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.142.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 12:25:23 -0700
|
||||
|
||||
matrix-synapse-py3 (1.142.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.142.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Nov 2025 09:45:51 +0000
|
||||
|
||||
matrix-synapse-py3 (1.142.0~rc4) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.142.0rc4.
|
||||
|
||||
@@ -11,7 +11,7 @@ ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=trixie
|
||||
|
||||
FROM docker.io/library/postgres:14-${DEBIAN_VERSION} AS postgres_base
|
||||
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
|
||||
|
||||
FROM $FROM
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
@@ -26,7 +26,7 @@ RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/14/bin"
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
|
||||
|
||||
@@ -196,7 +196,6 @@ WORKERS_CONFIG: dict[str, dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$",
|
||||
"^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Streams
|
||||
## Streams
|
||||
|
||||
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
|
||||
https://github.com/element-hq/synapse/blob/develop/synapse/storage/util/id_generators.py
|
||||
@@ -19,7 +19,7 @@ To that end, let's describe streams formally, paraphrasing from the docstring of
|
||||
https://github.com/element-hq/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
|
||||
).
|
||||
|
||||
## Definition
|
||||
### Definition
|
||||
|
||||
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
|
||||
Only "writers" can add facts to a stream, and there may be multiple writers.
|
||||
@@ -47,7 +47,7 @@ But unhappy cases (e.g. transaction rollback due to an error) also count as comp
|
||||
Once completed, the rows written with that stream ID are fixed, and no new rows
|
||||
will be inserted with that ID.
|
||||
|
||||
## Current stream ID
|
||||
### Current stream ID
|
||||
|
||||
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
|
||||
|
||||
@@ -93,7 +93,7 @@ Consider a single-writer stream which is initially at ID 1.
|
||||
| Complete 6 | 6 | |
|
||||
|
||||
|
||||
## Multi-writer streams
|
||||
### Multi-writer streams
|
||||
|
||||
There are two ways to view a multi-writer stream.
|
||||
|
||||
@@ -115,7 +115,7 @@ The facts this stream holds are instructions to "you should now invalidate these
|
||||
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
|
||||
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
|
||||
|
||||
## Writing to streams
|
||||
### Writing to streams
|
||||
|
||||
Writers need to track:
|
||||
- track their current position (i.e. its own per-writer stream ID).
|
||||
@@ -133,7 +133,7 @@ To complete a fact, first remove it from your map of facts currently awaiting co
|
||||
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
|
||||
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
|
||||
|
||||
## Subscribing to streams
|
||||
### Subscribing to streams
|
||||
|
||||
Readers need to track the current position of every writer.
|
||||
|
||||
@@ -146,44 +146,10 @@ The `RDATA` itself is not a self-contained representation of the fact;
|
||||
readers will have to query the stream tables for the full details.
|
||||
Readers must also advance their record of the writer's current position for that stream.
|
||||
|
||||
## Summary
|
||||
# Summary
|
||||
|
||||
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
|
||||
|
||||
---
|
||||
|
||||
## Cheatsheet for creating a new stream
|
||||
|
||||
These rough notes and links may help you to create a new stream and add all the
|
||||
necessary registration and event handling.
|
||||
|
||||
**Create your stream:**
|
||||
- [create a stream class and stream row class](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/streams/_base.py#L728)
|
||||
- will need an [ID generator](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L75)
|
||||
- may need [writer configuration](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/config/workers.py#L177), if there isn't already an obvious source of configuration for which workers should be designated as writers to your new stream.
|
||||
- if adding new writer configuration, add Docker-worker configuration, which lets us configure the writer worker in Complement tests: [[1]](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/docker/configure_workers_and_start.py#L331), [[2]](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/docker/configure_workers_and_start.py#L440)
|
||||
- most of the time, you will likely introduce a new datastore class for the concept represented by the new stream, unless there is already an obvious datastore that covers it.
|
||||
- consider whether it may make sense to introduce a handler
|
||||
|
||||
**Register your stream in:**
|
||||
- [`STREAMS_MAP`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/streams/__init__.py#L71)
|
||||
|
||||
**Advance your stream in:**
|
||||
- [`process_replication_position` of your appropriate datastore](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L111)
|
||||
- don't forget the super call
|
||||
|
||||
**If you're going to do any caching that needs invalidation from new rows:**
|
||||
- add invalidations to [`process_replication_rows` of your appropriate datastore](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L91)
|
||||
- don't forget the super call
|
||||
- add local-only [invalidations to your writer transactions](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L201)
|
||||
|
||||
**For streams to be used in sync:**
|
||||
- add a new field to [`StreamToken`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/types/__init__.py#L1003)
|
||||
- add a new [`StreamKeyType`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/types/__init__.py#L999)
|
||||
- add appropriate wake-up rules
|
||||
- in [`on_rdata`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/client.py#L260)
|
||||
- locally on the same worker when completing a write, [e.g. in your handler](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/handlers/thread_subscriptions.py#L139)
|
||||
- add the stream in [`bound_future_token`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/streams/events.py#L127)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -86,45 +86,6 @@ server {
|
||||
}
|
||||
```
|
||||
|
||||
### Nginx Proxy Manager or NPMPlus
|
||||
|
||||
```nginx
|
||||
Add New Proxy-Host
|
||||
- Tab Details
|
||||
- Domain Names: matrix.example.com
|
||||
- Scheme: http
|
||||
- Forward Hostname / IP: localhost # IP address or hostname where Synapse is hosted. Bare-metal or Container.
|
||||
- Forward Port: 8008
|
||||
|
||||
- Tab Custom locations
|
||||
- Add Location
|
||||
- Define Location: /_matrix
|
||||
- Scheme: http
|
||||
- Forward Hostname / IP: localhost # IP address or hostname where Synapse is hosted. Bare-metal or Container.
|
||||
- Forward Port: 8008
|
||||
- Click on the gear icon to display a custom configuration field. Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
- Enter this in the Custom Field: client_max_body_size 50M;
|
||||
|
||||
- Tab SSL/TLS
|
||||
- Choose your SSL/TLS certificate and preferred settings.
|
||||
|
||||
- Tab Advanced
|
||||
- Enter this in the Custom Field. This means that port 8448 no longer needs to be opened in your Firewall.
|
||||
The Federation communication use now Port 443.
|
||||
|
||||
location /.well-known/matrix/server {
|
||||
return 200 '{"m.server": "matrix.example.com:443"}';
|
||||
add_header Content-Type application/json;
|
||||
}
|
||||
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://matrix.example.com"}}';
|
||||
add_header Content-Type application/json;
|
||||
add_header "Access-Control-Allow-Origin" *;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Caddy v2
|
||||
|
||||
```
|
||||
|
||||
@@ -117,33 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.144.0
|
||||
|
||||
## Worker support for unstable MSC4140 `/restart` endpoint
|
||||
|
||||
The following unstable endpoint pattern may now be routed to worker processes:
|
||||
|
||||
```
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events/.*/restart$
|
||||
```
|
||||
|
||||
## Unstable mutual rooms endpoint is now behind an experimental feature flag
|
||||
|
||||
The unstable mutual rooms endpoint from
|
||||
[MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
|
||||
(`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) is now
|
||||
disabled by default. If you rely on this unstable endpoint, you must now set
|
||||
`experimental_features.msc2666_enabled: true` in your configuration to keep
|
||||
using it.
|
||||
|
||||
# Upgrading to v1.143.0
|
||||
|
||||
## Dropping support for PostgreSQL 13
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 13, as it is no longer supported upstream.
|
||||
This release of Synapse requires PostgreSQL 14+.
|
||||
|
||||
# Upgrading to v1.142.0
|
||||
|
||||
## Python 3.10+ is now required
|
||||
|
||||
+1
-4
@@ -285,13 +285,10 @@ information.
|
||||
# User directory search requests
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
# Unstable MSC4140 support
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
# Push rules requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
|
||||
Generated
+489
-511
File diff suppressed because it is too large
Load Diff
+208
-207
@@ -1,183 +1,3 @@
|
||||
[project]
|
||||
name = "matrix-synapse"
|
||||
version = "1.143.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
readme = "README.rst"
|
||||
authors = [
|
||||
{ name = "Matrix.org Team and Contributors", email = "packages@matrix.org" }
|
||||
]
|
||||
requires-python = ">=3.10.0,<4.0.0"
|
||||
license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial"
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Communications :: Chat",
|
||||
]
|
||||
|
||||
# Mandatory Dependencies
|
||||
dependencies = [
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
"jsonschema>=3.0.0",
|
||||
# 0.25.0 is the first version to support Python 3.14.
|
||||
# We can remove this once https://github.com/python-jsonschema/jsonschema/issues/1426 is fixed
|
||||
# and included in a release.
|
||||
"rpds-py>=0.25.0",
|
||||
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
|
||||
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
|
||||
"immutabledict>=2.0",
|
||||
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
|
||||
"unpaddedbase64>=2.1.0",
|
||||
# We require 2.0.0 for immutabledict support.
|
||||
"canonicaljson>=2.0.0,<3.0.0",
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
"signedjson>=1.1.0,<2.0.0",
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
"service-identity>=18.1.0",
|
||||
# Twisted 18.9 introduces some logger improvements that the structured
|
||||
# logger utilises
|
||||
# Twisted 19.7.0 moves test helpers to a new module and deprecates the old location.
|
||||
# Twisted 21.2.0 introduces contextvar support.
|
||||
# We could likely bump this to 22.1 without making distro packagers'
|
||||
# lives hard (as of 2025-07, distro support is Ubuntu LTS: 22.1, Debian stable: 22.4,
|
||||
# RHEL 9: 22.10)
|
||||
"Twisted[tls]>=21.2.0",
|
||||
"treq>=21.5.0",
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
"pyOpenSSL>=16.0.0",
|
||||
"PyYAML>=5.3",
|
||||
"pyasn1>=0.1.9",
|
||||
"pyasn1-modules>=0.0.7",
|
||||
"bcrypt>=3.1.7",
|
||||
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
|
||||
# Packagers that already took care of libwebp can lower that down to 5.4.0.
|
||||
"Pillow>=10.0.1",
|
||||
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
|
||||
# 2.0.5 updates collections.abc imports to avoid Python 3.10 incompatibility.
|
||||
"sortedcontainers>=2.0.5",
|
||||
"pymacaroons>=0.13.0",
|
||||
"msgpack>=0.5.2",
|
||||
"phonenumbers>=8.2.0",
|
||||
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
|
||||
# `prometheus_client.metrics` was added in 0.5.0, so we require that too.
|
||||
# We chose 0.6.0 as that is the current version in Debian Buster (oldstable).
|
||||
"prometheus-client>=0.6.0",
|
||||
# we use `order`, which arrived in attrs 19.2.0.
|
||||
# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
|
||||
"attrs>=19.2.0,!=21.1.0",
|
||||
"netaddr>=0.7.18",
|
||||
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
|
||||
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
"Jinja2>=3.0",
|
||||
# 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
|
||||
"bleach>=3.2.0",
|
||||
# pydantic 2.12 depends on typing-extensions>=4.14.1
|
||||
"typing-extensions>=4.14.1",
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
"cryptography>=3.4.7",
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
"ijson>=3.1.4",
|
||||
"matrix-common>=1.3.0,<2.0.0",
|
||||
# We need packaging.verison.Version(...).major added in 20.0.
|
||||
"packaging>=20.0",
|
||||
"pydantic>=2.8;python_version < '3.14'",
|
||||
"pydantic>=2.12;python_version >= '3.14'",
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --without dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
"setuptools_rust>=1.3",
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
"python-multipart>=0.0.9",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3>=0.1"]
|
||||
postgres = [
|
||||
"psycopg2>=2.8;platform_python_implementation != 'PyPy'",
|
||||
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
|
||||
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
|
||||
]
|
||||
saml2 = ["pysaml2>=4.5.0"]
|
||||
oidc = ["authlib>=0.15.1"]
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
systemd = ["systemd-python>=231"]
|
||||
url-preview = ["lxml>=4.6.3"]
|
||||
sentry = ["sentry-sdk>=0.7.2"]
|
||||
opentracing = ["jaeger-client>=4.2.0", "opentracing>=2.2.0"]
|
||||
jwt = ["authlib"]
|
||||
# hiredis is not a *strict* dependency, but it makes things much faster.
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
redis = ["txredisapi>=1.4.7", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache-memory = ["pympler"]
|
||||
# If this is updated, don't forget to update the equivalent lines in
|
||||
# tool.poetry.group.dev.dependencies.
|
||||
test = ["parameterized>=0.9.0", "idna>=3.3"]
|
||||
|
||||
# The duplication here is awful.
|
||||
#
|
||||
# TODO: This can be resolved via PEP 735 dependency groups, which poetry supports
|
||||
# since 2.2.0. However, switching to that would require updating the command
|
||||
# developers use to install the `all` group. This would require some coordination.
|
||||
#
|
||||
# NB: the strings in this list must be *package* names, not extra names.
|
||||
# Some of our extra names _are_ package names, which can lead to great confusion.
|
||||
all = [
|
||||
# matrix-synapse-ldap3
|
||||
"matrix-synapse-ldap3>=0.1",
|
||||
# postgres
|
||||
"psycopg2>=2.8;platform_python_implementation != 'PyPy'",
|
||||
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
|
||||
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
|
||||
# saml2
|
||||
"pysaml2>=4.5.0",
|
||||
# oidc and jwt
|
||||
"authlib>=0.15.1",
|
||||
# url-preview
|
||||
"lxml>=4.6.3",
|
||||
# sentry
|
||||
"sentry-sdk>=0.7.2",
|
||||
# opentracing
|
||||
"jaeger-client>=4.2.0", "opentracing>=2.2.0",
|
||||
# redis
|
||||
"txredisapi>=1.4.7", "hiredis",
|
||||
# cache-memory
|
||||
"pympler",
|
||||
# omitted:
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
repository = "https://github.com/element-hq/synapse"
|
||||
documentation = "https://element-hq.github.io/synapse/latest"
|
||||
"Issue Tracker" = "https://github.com/element-hq/synapse/issues"
|
||||
|
||||
[project.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
synapse_worker = "synapse.app.generic_worker:main"
|
||||
synctl = "synapse._scripts.synctl:main"
|
||||
|
||||
export_signing_key = "synapse._scripts.export_signing_key:main"
|
||||
generate_config = "synapse._scripts.generate_config:main"
|
||||
generate_log_config = "synapse._scripts.generate_log_config:main"
|
||||
generate_signing_key = "synapse._scripts.generate_signing_key:main"
|
||||
hash_password = "synapse._scripts.hash_password:main"
|
||||
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
|
||||
synapse_port_db = "synapse._scripts.synapse_port_db:main"
|
||||
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
|
||||
[tool.towncrier]
|
||||
package = "synapse"
|
||||
filename = "CHANGES.md"
|
||||
@@ -269,8 +89,6 @@ extend-safe-fixes = [
|
||||
"UP007",
|
||||
# pyupgrade rules compatible with Python >= 3.10
|
||||
"UP045",
|
||||
# Allow ruff to automatically fix trailing spaces within a multi-line string/comment.
|
||||
"W293"
|
||||
]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
@@ -293,9 +111,20 @@ manifest-path = "rust/Cargo.toml"
|
||||
module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.142.0rc4"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial"
|
||||
readme = "README.rst"
|
||||
repository = "https://github.com/element-hq/synapse"
|
||||
packages = [
|
||||
{ include = "synapse" },
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Communications :: Chat",
|
||||
]
|
||||
include = [
|
||||
{ path = "AUTHORS.rst", format = "sdist" },
|
||||
{ path = "book.toml", format = "sdist" },
|
||||
@@ -325,12 +154,197 @@ exclude = [
|
||||
script = "build_rust.py"
|
||||
generate-setup-file = true
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
synapse_worker = "synapse.app.generic_worker:main"
|
||||
synctl = "synapse._scripts.synctl:main"
|
||||
|
||||
export_signing_key = "synapse._scripts.export_signing_key:main"
|
||||
generate_config = "synapse._scripts.generate_config:main"
|
||||
generate_log_config = "synapse._scripts.generate_log_config:main"
|
||||
generate_signing_key = "synapse._scripts.generate_signing_key:main"
|
||||
hash_password = "synapse._scripts.hash_password:main"
|
||||
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
|
||||
synapse_port_db = "synapse._scripts.synapse_port_db:main"
|
||||
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
jsonschema = ">=3.0.0"
|
||||
# 0.25.0 is the first version to support Python 3.14.
|
||||
# We can remove this once https://github.com/python-jsonschema/jsonschema/issues/1426 is fixed
|
||||
# and included in a release.
|
||||
rpds-py = ">=0.25.0"
|
||||
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
|
||||
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
|
||||
immutabledict = ">=2.0"
|
||||
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
|
||||
unpaddedbase64 = ">=2.1.0"
|
||||
# We require 2.0.0 for immutabledict support.
|
||||
canonicaljson = "^2.0.0"
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
signedjson = "^1.1.0"
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
service-identity = ">=18.1.0"
|
||||
# Twisted 18.9 introduces some logger improvements that the structured
|
||||
# logger utilises
|
||||
# Twisted 19.7.0 moves test helpers to a new module and deprecates the old location.
|
||||
# Twisted 21.2.0 introduces contextvar support.
|
||||
# We could likely bump this to 22.1 without making distro packagers'
|
||||
# lives hard (as of 2025-07, distro support is Ubuntu LTS: 22.1, Debian stable: 22.4,
|
||||
# RHEL 9: 22.10)
|
||||
Twisted = {extras = ["tls"], version = ">=21.2.0"}
|
||||
treq = ">=21.5.0"
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=5.3"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
|
||||
# Packagers that already took care of libwebp can lower that down to 5.4.0.
|
||||
Pillow = ">=10.0.1"
|
||||
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
|
||||
# 2.0.5 updates collections.abc imports to avoid Python 3.10 incompatibility.
|
||||
sortedcontainers = ">=2.0.5"
|
||||
pymacaroons = ">=0.13.0"
|
||||
msgpack = ">=0.5.2"
|
||||
phonenumbers = ">=8.2.0"
|
||||
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
|
||||
# `prometheus_client.metrics` was added in 0.5.0, so we require that too.
|
||||
# We chose 0.6.0 as that is the current version in Debian Buster (oldstable).
|
||||
prometheus-client = ">=0.6.0"
|
||||
# we use `order`, which arrived in attrs 19.2.0.
|
||||
# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
|
||||
attrs = ">=19.2.0,!=21.1.0"
|
||||
netaddr = ">=0.7.18"
|
||||
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
|
||||
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
# 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
|
||||
bleach = ">=3.2.0"
|
||||
# pydantic 2.12 depends on typing-extensions>=4.14.1
|
||||
typing-extensions = ">=4.14.1"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.verison.Version(...).major added in 20.0.
|
||||
packaging = ">=20.0"
|
||||
pydantic = [
|
||||
{ version = "~=2.8", python = "<3.14" },
|
||||
{ version = "~=2.12", python = ">=3.14" },
|
||||
]
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --without dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
matrix-synapse-ldap3 = { version = ">=0.1", optional = true }
|
||||
psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true }
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.15.1", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
# Note: systemd-python 231 appears to have been yanked from pypi
|
||||
systemd-python = { version = ">=231", optional = true }
|
||||
# 4.6.3 removes usage of _PyGen_Send which is unavailable in CPython as of Python 3.10.
|
||||
lxml = { version = ">=4.6.3", optional = true }
|
||||
sentry-sdk = { version = ">=0.7.2", optional = true }
|
||||
opentracing = { version = ">=2.2.0", optional = true }
|
||||
# 4.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
|
||||
jaeger-client = { version = ">=4.2.0", optional = true }
|
||||
txredisapi = { version = ">=1.4.7", optional = true }
|
||||
hiredis = { version = "*", optional = true }
|
||||
Pympler = { version = "*", optional = true }
|
||||
parameterized = { version = ">=0.7.4", optional = true }
|
||||
idna = { version = ">=2.5", optional = true }
|
||||
|
||||
[tool.poetry.extras]
|
||||
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
|
||||
# twice: once here, and once in the `all` extra.
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
|
||||
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
|
||||
saml2 = ["pysaml2"]
|
||||
oidc = ["authlib"]
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
systemd = ["systemd-python"]
|
||||
url-preview = ["lxml"]
|
||||
sentry = ["sentry-sdk"]
|
||||
opentracing = ["jaeger-client", "opentracing"]
|
||||
jwt = ["authlib"]
|
||||
# hiredis is not a *strict* dependency, but it makes things much faster.
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
redis = ["txredisapi", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache-memory = ["pympler"]
|
||||
test = ["parameterized", "idna"]
|
||||
|
||||
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
|
||||
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
|
||||
# 1) for new installations, I want instructions in existing documentation and tutorials
|
||||
# out there to still work.
|
||||
# 2) I don't want to hard-code a list of extras into CI if I can help it. The ideal
|
||||
# solution here would be something like https://github.com/python-poetry/poetry/issues/3413
|
||||
# Poetry 1.2's dependency groups might make this easier. But I'm not trying that out
|
||||
# until there's a stable release of 1.2.
|
||||
#
|
||||
# NB: the strings in this list must be *package* names, not extra names.
|
||||
# Some of our extra names _are_ package names, which can lead to great confusion.
|
||||
all = [
|
||||
# matrix-synapse-ldap3
|
||||
"matrix-synapse-ldap3",
|
||||
# postgres
|
||||
"psycopg2", "psycopg2cffi", "psycopg2cffi-compat",
|
||||
# saml2
|
||||
"pysaml2",
|
||||
# oidc and jwt
|
||||
"authlib",
|
||||
# url-preview
|
||||
"lxml",
|
||||
# sentry
|
||||
"sentry-sdk",
|
||||
# opentracing
|
||||
"jaeger-client", "opentracing",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache-memory
|
||||
"pympler",
|
||||
# omitted:
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
# We pin development dependencies in poetry.lock so that our tests don't start
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.14.5"
|
||||
ruff = "0.14.3"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
@@ -350,11 +364,10 @@ types-setuptools = ">=57.4.0"
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
#
|
||||
# If this is updated, don't forget to update the equivalent lines in
|
||||
# project.optional-dependencies.test.
|
||||
parameterized = ">=0.9.0"
|
||||
idna = ">=3.3"
|
||||
# parameterized<0.7.4 can create classes with names that would normally be invalid
|
||||
# identifiers. trial really does not like this when running with multiple workers.
|
||||
parameterized = ">=0.7.4"
|
||||
idna = ">=2.5"
|
||||
|
||||
# The following are used by the release script
|
||||
click = ">=8.1.3"
|
||||
@@ -370,9 +383,6 @@ towncrier = ">=18.6.0rc1"
|
||||
# Used for checking the Poetry lockfile
|
||||
tomli = ">=1.2.3"
|
||||
|
||||
# Used for checking the schema delta files
|
||||
sqlglot = ">=28.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
@@ -381,28 +391,19 @@ sqlglot = ">=28.0.0"
|
||||
# runtime errors caused by build system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=2.0.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
|
||||
requires = ["poetry-core>=1.1.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
#
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the
|
||||
# list of supported build targets.
|
||||
#
|
||||
# Also see `.github/workflows/release-artifacts.yml` for the list of
|
||||
# architectures we build for (based on the runner OS types we use), as well as
|
||||
# the platforms we exclude from testing in CI.
|
||||
#
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - free-threaded cpython builds: these are not currently supported.
|
||||
# - i686: We don't support 32-bit platforms.
|
||||
# - *macosx*: we don't support building wheels for MacOS.
|
||||
skip = "cp3??t-* *i686* *macosx*"
|
||||
# Enable non-default builds. See the list of available options:
|
||||
# https://cibuildwheel.pypa.io/en/stable/options#enable
|
||||
#
|
||||
# - CPython 3.8: EOLed
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
skip = "cp38* *-musllinux_i686"
|
||||
# Enable non-default builds.
|
||||
# "pypy" used to be included by default up until cibuildwheel 3.
|
||||
enable = "pypy"
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2025 Element Creations, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
use pyo3::{
|
||||
types::{IntoPyDict, PyAnyMethods},
|
||||
Bound, BoundObject, IntoPyObject, Py, PyAny, PyErr, PyResult, Python,
|
||||
};
|
||||
|
||||
/// A reference to the `synapse.util.duration` module.
|
||||
static DURATION: OnceCell<Py<PyAny>> = OnceCell::new();
|
||||
|
||||
/// Access to the `synapse.util.duration` module.
|
||||
fn duration_module(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> {
|
||||
Ok(DURATION
|
||||
.get_or_try_init(|| py.import("synapse.util.duration").map(Into::into))?
|
||||
.bind(py))
|
||||
}
|
||||
|
||||
/// Mirrors the `synapse.util.duration.Duration` Python class.
|
||||
pub struct SynapseDuration {
|
||||
microseconds: u64,
|
||||
}
|
||||
|
||||
impl SynapseDuration {
|
||||
/// For now we only need to create durations from milliseconds.
|
||||
pub fn from_milliseconds(milliseconds: u64) -> Self {
|
||||
Self {
|
||||
microseconds: milliseconds * 1_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'py> IntoPyObject<'py> for &SynapseDuration {
|
||||
type Target = PyAny;
|
||||
type Output = Bound<'py, Self::Target>;
|
||||
type Error = PyErr;
|
||||
|
||||
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
|
||||
let duration_module = duration_module(py)?;
|
||||
let kwargs = [("microseconds", self.microseconds)].into_py_dict(py)?;
|
||||
let duration_instance = duration_module.call_method("Duration", (), Some(&kwargs))?;
|
||||
Ok(duration_instance.into_bound())
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ use pyo3::prelude::*;
|
||||
use pyo3_log::ResetHandle;
|
||||
|
||||
pub mod acl;
|
||||
pub mod duration;
|
||||
pub mod errors;
|
||||
pub mod events;
|
||||
pub mod http;
|
||||
|
||||
@@ -35,7 +35,6 @@ use ulid::Ulid;
|
||||
|
||||
use self::session::Session;
|
||||
use crate::{
|
||||
duration::SynapseDuration,
|
||||
errors::{NotFoundError, SynapseError},
|
||||
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
|
||||
UnwrapInfallible,
|
||||
@@ -133,8 +132,6 @@ impl RendezvousHandler {
|
||||
.unwrap_infallible()
|
||||
.unbind();
|
||||
|
||||
let eviction_duration = SynapseDuration::from_milliseconds(eviction_interval);
|
||||
|
||||
// Construct a Python object so that we can get a reference to the
|
||||
// evict method and schedule it to run.
|
||||
let self_ = Py::new(
|
||||
@@ -152,7 +149,7 @@ impl RendezvousHandler {
|
||||
let evict = self_.getattr(py, "_evict")?;
|
||||
homeserver.call_method0("get_clock")?.call_method(
|
||||
"looping_call",
|
||||
(evict, &eviction_duration),
|
||||
(evict, eviction_interval),
|
||||
None,
|
||||
)?;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.142/synapse-config.schema.json
|
||||
type: object
|
||||
properties:
|
||||
modules:
|
||||
|
||||
@@ -9,11 +9,15 @@ from typing import Any
|
||||
|
||||
import click
|
||||
import git
|
||||
import sqlglot
|
||||
import sqlglot.expressions
|
||||
|
||||
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
|
||||
|
||||
INDEX_CREATION_REGEX = re.compile(
|
||||
r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE
|
||||
)
|
||||
INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE)
|
||||
TABLE_CREATION_REGEX = re.compile(
|
||||
r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE
|
||||
)
|
||||
|
||||
# The base branch we want to check against. We use the main development branch
|
||||
# on the assumption that is what we are developing against.
|
||||
@@ -137,9 +141,6 @@ def main(force_colors: bool) -> None:
|
||||
color=force_colors,
|
||||
)
|
||||
|
||||
# Mark this run as not successful, but continue so that we report *all*
|
||||
# errors.
|
||||
return_code = 1
|
||||
else:
|
||||
click.secho(
|
||||
f"All deltas are in the correct folder: {current_schema_version}!",
|
||||
@@ -152,90 +153,60 @@ def main(force_colors: bool) -> None:
|
||||
# and delta files are also numbered in order.
|
||||
changed_delta_files.sort()
|
||||
|
||||
success = check_schema_delta(changed_delta_files, force_colors)
|
||||
if not success:
|
||||
return_code = 1
|
||||
# Now check that we're not trying to create or drop indices. If we want to
|
||||
# do that they should be in background updates. The exception is when we
|
||||
# create indices on tables we've just created.
|
||||
created_tables = set()
|
||||
for delta_file in changed_delta_files:
|
||||
with open(delta_file) as fd:
|
||||
delta_lines = fd.readlines()
|
||||
|
||||
for line in delta_lines:
|
||||
# Strip SQL comments
|
||||
line = line.split("--", maxsplit=1)[0]
|
||||
|
||||
# Check and track any tables we create
|
||||
match = TABLE_CREATION_REGEX.search(line)
|
||||
if match:
|
||||
table_name = match.group(1)
|
||||
created_tables.add(table_name)
|
||||
|
||||
# Check for dropping indices, these are always banned
|
||||
match = INDEX_DELETION_REGEX.search(line)
|
||||
if match:
|
||||
clause = match.group()
|
||||
|
||||
click.secho(
|
||||
f"Found delta with index deletion: '{clause}' in {delta_file}",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.secho(
|
||||
" ↪ These should be in background updates.",
|
||||
)
|
||||
return_code = 1
|
||||
|
||||
# Check for index creation, which is only allowed for tables we've
|
||||
# created.
|
||||
match = INDEX_CREATION_REGEX.search(line)
|
||||
if match:
|
||||
clause = match.group()
|
||||
table_name = match.group(1)
|
||||
if table_name not in created_tables:
|
||||
click.secho(
|
||||
f"Found delta with index creation for existing table: '{clause}' in {delta_file}",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.secho(
|
||||
" ↪ These should be in background updates (or the table should be created in the same delta).",
|
||||
)
|
||||
return_code = 1
|
||||
|
||||
click.get_current_context().exit(return_code)
|
||||
|
||||
|
||||
def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool:
|
||||
"""Check that the given schema delta files do not create or drop indices
|
||||
inappropriately.
|
||||
|
||||
Index creation is only allowed on tables created in the same set of deltas.
|
||||
|
||||
Index deletion is never allowed and should be done in background updates.
|
||||
|
||||
Returns:
|
||||
True if all checks succeeded, False if at least one failed.
|
||||
"""
|
||||
|
||||
# The tables created in this delta
|
||||
created_tables = set[str]()
|
||||
|
||||
# The indices created/dropped in this delta, each a tuple of (table_name, sql)
|
||||
created_indices = list[tuple[str, str]]()
|
||||
|
||||
# The indices dropped in this delta, just the sql
|
||||
dropped_indices = list[str]()
|
||||
|
||||
for delta_file in delta_files:
|
||||
with open(delta_file) as fd:
|
||||
delta_contents = fd.read()
|
||||
|
||||
# Assume the SQL dialect from the file extension, defaulting to Postgres.
|
||||
sql_lang = "postgres"
|
||||
if delta_file.endswith(".sqlite"):
|
||||
sql_lang = "sqlite"
|
||||
|
||||
statements = sqlglot.parse(delta_contents, read=sql_lang)
|
||||
|
||||
for statement in statements:
|
||||
if isinstance(statement, sqlglot.expressions.Create):
|
||||
if statement.kind == "TABLE":
|
||||
assert isinstance(statement.this, sqlglot.expressions.Schema)
|
||||
assert isinstance(statement.this.this, sqlglot.expressions.Table)
|
||||
|
||||
table_name = statement.this.this.name
|
||||
created_tables.add(table_name)
|
||||
elif statement.kind == "INDEX":
|
||||
assert isinstance(statement.this, sqlglot.expressions.Index)
|
||||
|
||||
table_name = statement.this.args["table"].name
|
||||
created_indices.append((table_name, statement.sql()))
|
||||
elif isinstance(statement, sqlglot.expressions.Drop):
|
||||
if statement.kind == "INDEX":
|
||||
dropped_indices.append(statement.sql())
|
||||
|
||||
success = True
|
||||
for table_name, clause in created_indices:
|
||||
if table_name not in created_tables:
|
||||
click.secho(
|
||||
f"Found delta with index creation for existing table: '{clause}'",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.secho(
|
||||
" ↪ These should be in background updates (or the table should be created in the same delta).",
|
||||
)
|
||||
success = False
|
||||
|
||||
for clause in dropped_indices:
|
||||
click.secho(
|
||||
f"Found delta with index deletion: '{clause}'",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.secho(
|
||||
" ↪ These should be in background updates.",
|
||||
)
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
+174
-205
@@ -72,151 +72,153 @@ For help on arguments to 'go test', run 'go help testflag'.
|
||||
EOF
|
||||
}
|
||||
|
||||
# We use a function to wrap the script logic so that we can use `return` to exit early
|
||||
# if needed. This is particularly useful so that this script can be sourced by other
|
||||
# scripts without exiting the calling subshell (composable). This allows us to share
|
||||
# variables like `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` with other scripts.
|
||||
#
|
||||
# Returns an exit code of 0 on success, or 1 on failure.
|
||||
main() {
|
||||
# parse our arguments
|
||||
skip_docker_build=""
|
||||
skip_complement_run=""
|
||||
while [ $# -ge 1 ]; do
|
||||
# parse our arguments
|
||||
skip_docker_build=""
|
||||
skip_complement_run=""
|
||||
while [ $# -ge 1 ]; do
|
||||
arg=$1
|
||||
case "$arg" in
|
||||
"-h")
|
||||
usage
|
||||
return 1
|
||||
;;
|
||||
"-f"|"--fast")
|
||||
skip_docker_build=1
|
||||
;;
|
||||
"--build-only")
|
||||
skip_complement_run=1
|
||||
;;
|
||||
"-e"|"--editable")
|
||||
use_editable_synapse=1
|
||||
;;
|
||||
"--rebuild-editable")
|
||||
rebuild_editable_synapse=1
|
||||
;;
|
||||
*)
|
||||
# unknown arg: presumably an argument to gotest. break the loop.
|
||||
break
|
||||
"-h")
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
"-f"|"--fast")
|
||||
skip_docker_build=1
|
||||
;;
|
||||
"--build-only")
|
||||
skip_complement_run=1
|
||||
;;
|
||||
"-e"|"--editable")
|
||||
use_editable_synapse=1
|
||||
;;
|
||||
"--rebuild-editable")
|
||||
rebuild_editable_synapse=1
|
||||
;;
|
||||
*)
|
||||
# unknown arg: presumably an argument to gotest. break the loop.
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
done
|
||||
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# Determine whether to use the docker or podman container runtime.
|
||||
if [ -n "$PODMAN" ]; then
|
||||
export CONTAINER_RUNTIME=podman
|
||||
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
|
||||
export BUILDAH_FORMAT=docker
|
||||
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
|
||||
else
|
||||
export CONTAINER_RUNTIME=docker
|
||||
fi
|
||||
# Determine whether to use the docker or podman container runtime.
|
||||
if [ -n "$PODMAN" ]; then
|
||||
export CONTAINER_RUNTIME=podman
|
||||
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
|
||||
export BUILDAH_FORMAT=docker
|
||||
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
|
||||
else
|
||||
export CONTAINER_RUNTIME=docker
|
||||
fi
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
# Check for a user-specified Complement checkout
|
||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
|
||||
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
|
||||
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
|
||||
tar -xzf ${COMPLEMENT_REF}.tar.gz
|
||||
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
|
||||
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
|
||||
fi
|
||||
# Check for a user-specified Complement checkout
|
||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
|
||||
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
|
||||
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
|
||||
tar -xzf ${COMPLEMENT_REF}.tar.gz
|
||||
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
|
||||
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
|
||||
fi
|
||||
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
if [[ -e synapse/synapse_rust.abi3.so ]]; then
|
||||
# In an editable install, back up the host's compiled Rust module to prevent
|
||||
# inconvenience; the container will overwrite the module with its own copy.
|
||||
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
|
||||
# And restore it on exit:
|
||||
synapse_pkg=`realpath synapse`
|
||||
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
|
||||
# In an editable install, back up the host's compiled Rust module to prevent
|
||||
# inconvenience; the container will overwrite the module with its own copy.
|
||||
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
|
||||
# And restore it on exit:
|
||||
synapse_pkg=`realpath synapse`
|
||||
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
|
||||
fi
|
||||
|
||||
editable_mount="$(realpath .):/editable-src:z"
|
||||
if [ -n "$rebuild_editable_synapse" ]; then
|
||||
unset skip_docker_build
|
||||
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
|
||||
# complement-synapse-editable already exists: see if we can still use it:
|
||||
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
|
||||
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
|
||||
|
||||
# First set up the module in the right place for an editable installation.
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
skip_docker_build=1
|
||||
else
|
||||
echo "Editable Synapse image is stale. Will rebuild."
|
||||
unset skip_docker_build
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
|
||||
# complement-synapse-editable already exists: see if we can still use it:
|
||||
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
|
||||
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
|
||||
|
||||
if [ -z "$skip_docker_build" ]; then
|
||||
# First set up the module in the right place for an editable installation.
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
skip_docker_build=1
|
||||
else
|
||||
echo "Editable Synapse image is stale. Will rebuild."
|
||||
unset skip_docker_build
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$skip_docker_build" ]; then
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
|
||||
# Build a special image designed for use in development with editable
|
||||
# installs.
|
||||
$CONTAINER_RUNTIME build -t synapse-editable \
|
||||
-f "docker/editable.Dockerfile" .
|
||||
# Build a special image designed for use in development with editable
|
||||
# installs.
|
||||
$CONTAINER_RUNTIME build -t synapse-editable \
|
||||
-f "docker/editable.Dockerfile" .
|
||||
|
||||
$CONTAINER_RUNTIME build -t synapse-workers-editable \
|
||||
--build-arg FROM=synapse-editable \
|
||||
-f "docker/Dockerfile-workers" .
|
||||
$CONTAINER_RUNTIME build -t synapse-workers-editable \
|
||||
--build-arg FROM=synapse-editable \
|
||||
-f "docker/Dockerfile-workers" .
|
||||
|
||||
$CONTAINER_RUNTIME build -t complement-synapse-editable \
|
||||
--build-arg FROM=synapse-workers-editable \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
$CONTAINER_RUNTIME build -t complement-synapse-editable \
|
||||
--build-arg FROM=synapse-workers-editable \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
|
||||
# Prepare the Rust module
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
# Prepare the Rust module
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
else
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
$CONTAINER_RUNTIME build -t complement-synapse \
|
||||
`# This is the tag we end up pushing to the registry (see` \
|
||||
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
|
||||
`# so people can reference it by the same name locally.` \
|
||||
-t ghcr.io/element-hq/synapse/complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
$CONTAINER_RUNTIME build -t complement-synapse \
|
||||
`# This is the tag we end up pushing to the registry (see` \
|
||||
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
|
||||
`# so people can reference it by the same name locally.` \
|
||||
-t ghcr.io/element-hq/synapse/complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
fi
|
||||
|
||||
echo "Docker images built."
|
||||
else
|
||||
echo "Skipping Docker image build as requested."
|
||||
fi
|
||||
fi
|
||||
|
||||
test_packages=(
|
||||
if [ -n "$skip_complement_run" ]; then
|
||||
echo "Skipping Complement run as requested."
|
||||
exit
|
||||
fi
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
|
||||
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
|
||||
fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_packages=(
|
||||
./tests/csapi
|
||||
./tests
|
||||
./tests/msc3874
|
||||
@@ -229,104 +231,71 @@ main() {
|
||||
./tests/msc4140
|
||||
./tests/msc4155
|
||||
./tests/msc4306
|
||||
)
|
||||
)
|
||||
|
||||
# Export the list of test packages as a space-separated environment variable, so other
|
||||
# scripts can use it.
|
||||
export SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES="${test_packages[@]}"
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
# This significantly speeds up tests, but increases the possibility of test pollution.
|
||||
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
|
||||
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
|
||||
fi
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
# This significantly speeds up tests, but increases the possibility of test pollution.
|
||||
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
|
||||
# It takes longer than 10m to run the whole suite.
|
||||
extra_test_args+=("-timeout=60m")
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
# Use workers.
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
|
||||
|
||||
# * -count=1: Only run tests once, and disable caching for tests.
|
||||
# * -v: Output test logs, even if those tests pass.
|
||||
# * -tags=synapse_blacklist: Enable the `synapse_blacklist` build tag, which is
|
||||
# necessary for `runtime.Synapse` checks/skips to work in the tests
|
||||
test_args=(
|
||||
-v
|
||||
-tags="synapse_blacklist"
|
||||
-count=1
|
||||
)
|
||||
# Pass through the workers defined. If none, it will be an empty string
|
||||
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
|
||||
|
||||
# It takes longer than 10m to run the whole suite.
|
||||
test_args+=("-timeout=60m")
|
||||
# Workers can only use Postgres as a database.
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
|
||||
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
# Use workers.
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
|
||||
# And provide some more configuration to complement.
|
||||
|
||||
# Pass through the workers defined. If none, it will be an empty string
|
||||
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
|
||||
|
||||
# Workers can only use Postgres as a database.
|
||||
# It can take quite a while to spin up a worker-mode Synapse for the first
|
||||
# time (the main problem is that we start 14 python processes for each test,
|
||||
# and complement likes to do two of them in parallel).
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
|
||||
else
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
|
||||
if [[ -n "$POSTGRES" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
|
||||
|
||||
# And provide some more configuration to complement.
|
||||
|
||||
# It can take quite a while to spin up a worker-mode Synapse for the first
|
||||
# time (the main problem is that we start 14 python processes for each test,
|
||||
# and complement likes to do two of them in parallel).
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
|
||||
else
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
|
||||
if [[ -n "$POSTGRES" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
|
||||
else
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
|
||||
fi
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
|
||||
fi
|
||||
|
||||
if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
# Enable the Twisted asyncio reactor
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
|
||||
|
||||
# Allow logging sensitive things (currently SQL queries & parameters).
|
||||
# (This won't have any effect if we're not logging at DEBUG level overall.)
|
||||
# Since this is just a test suite, this is fine and won't reveal anyone's
|
||||
# personal information
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Log a few more useful things for a developer attempting to debug something
|
||||
# particularly tricky.
|
||||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
if [ -n "$skip_complement_run" ]; then
|
||||
echo "Skipping Complement run as requested."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
echo "Running Complement with ${test_args[@]} $@ ${test_packages[@]}"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
go test "${test_args[@]}" "$@" "${test_packages[@]}"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
# For any non-zero exit code (indicating some sort of error happened), we want to exit
|
||||
# with that code.
|
||||
exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
exit $exit_code
|
||||
fi
|
||||
|
||||
if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
# Enable the Twisted asyncio reactor
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
|
||||
|
||||
# Allow logging sensitive things (currently SQL queries & parameters).
|
||||
# (This won't have any effect if we're not logging at DEBUG level overall.)
|
||||
# Since this is just a test suite, this is fine and won't reveal anyone's
|
||||
# personal information
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Log a few more useful things for a developer attempting to debug something
|
||||
# particularly tricky.
|
||||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
|
||||
|
||||
@@ -291,12 +291,6 @@ def _prepare() -> None:
|
||||
synapse_repo.git.add("-u")
|
||||
subprocess.run("git diff --cached", shell=True)
|
||||
|
||||
print(
|
||||
"Consider any upcoming platform deprecations that should be mentioned in the changelog. (e.g. upcoming Python, PostgreSQL or SQLite deprecations)"
|
||||
)
|
||||
print(
|
||||
"Platform deprecations should be mentioned at least 1 release prior to being unsupported."
|
||||
)
|
||||
if click.confirm("Edit changelog?", default=False):
|
||||
click.edit(filename="CHANGES.md")
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import sys
|
||||
from typing import Any, Callable, Iterable, TextIO
|
||||
from typing import Any, Callable
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
@@ -244,7 +244,6 @@ def main() -> None:
|
||||
group.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
action="append",
|
||||
type=argparse.FileType("r"),
|
||||
help="Path to server config file. Used to read in shared secret.",
|
||||
)
|
||||
@@ -265,7 +264,7 @@ def main() -> None:
|
||||
|
||||
config: dict[str, Any] | None = None
|
||||
if "config" in args and args.config:
|
||||
config = _read_config_files(args.config)
|
||||
config = yaml.safe_load(args.config)
|
||||
|
||||
if args.shared_secret:
|
||||
secret = args.shared_secret
|
||||
@@ -327,33 +326,6 @@ def main() -> None:
|
||||
)
|
||||
|
||||
|
||||
# Adapted from synapse.config._base.
|
||||
def _read_config_files(config_files: Iterable[TextIO]) -> dict[str, Any]:
|
||||
"""Read the config files and shallowly merge them into a dict.
|
||||
|
||||
Successive configurations are shallowly merged into ones provided earlier,
|
||||
i.e., entirely replacing top-level sections of the configuration.
|
||||
|
||||
Args:
|
||||
config_files: A list of the config files to read
|
||||
|
||||
Returns:
|
||||
The configuration dictionary.
|
||||
"""
|
||||
specified_config = {}
|
||||
for config_file in config_files:
|
||||
yaml_config = yaml.safe_load(config_file)
|
||||
|
||||
if not isinstance(yaml_config, dict):
|
||||
err = "File %r is empty or doesn't parse into a key-value map. IGNORING."
|
||||
print(err % (config_file,))
|
||||
continue
|
||||
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
return specified_config
|
||||
|
||||
|
||||
def _read_file(file_path: Any, config_path: str) -> str:
|
||||
"""Check the given file exists, and read it into a string
|
||||
|
||||
|
||||
@@ -58,7 +58,6 @@ from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.delayed_events import DelayedEventsStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
@@ -108,7 +107,6 @@ logger = logging.getLogger("synapse_port_db")
|
||||
BOOLEAN_COLUMNS = {
|
||||
"access_tokens": ["used"],
|
||||
"account_validity": ["email_sent"],
|
||||
"delayed_events": ["is_processed"],
|
||||
"device_lists_changes_in_room": ["converted_to_destinations"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
"devices": ["hidden"],
|
||||
@@ -274,7 +272,6 @@ class Store(
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
SlidingSyncStore,
|
||||
DelayedEventsStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
+24
-3
@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from pydantic import (
|
||||
AnyHttpUrl,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
StrictBool,
|
||||
@@ -146,13 +147,33 @@ class MasDelegatedAuth(BaseAuth):
|
||||
|
||||
@property
|
||||
def _metadata_url(self) -> str:
|
||||
return (
|
||||
f"{str(self._config.endpoint).rstrip('/')}/.well-known/openid-configuration"
|
||||
return str(
|
||||
AnyHttpUrl.build(
|
||||
scheme=self._config.endpoint.scheme,
|
||||
username=self._config.endpoint.username,
|
||||
password=self._config.endpoint.password,
|
||||
host=self._config.endpoint.host or "",
|
||||
port=self._config.endpoint.port,
|
||||
path=".well-known/openid-configuration",
|
||||
query=None,
|
||||
fragment=None,
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
def _introspection_endpoint(self) -> str:
|
||||
return f"{str(self._config.endpoint).rstrip('/')}/oauth2/introspect"
|
||||
return str(
|
||||
AnyHttpUrl.build(
|
||||
scheme=self._config.endpoint.scheme,
|
||||
username=self._config.endpoint.username,
|
||||
password=self._config.endpoint.password,
|
||||
host=self._config.endpoint.host or "",
|
||||
port=self._config.endpoint.port,
|
||||
path="oauth2/introspect",
|
||||
query=None,
|
||||
fragment=None,
|
||||
)
|
||||
)
|
||||
|
||||
async def _load_metadata(self) -> ServerMetadata:
|
||||
response = await self._http_client.get_json(self._metadata_url)
|
||||
|
||||
@@ -272,6 +272,9 @@ class EventContentFields:
|
||||
M_TOPIC: Final = "m.topic"
|
||||
M_TEXT: Final = "m.text"
|
||||
|
||||
# Event relations
|
||||
RELATIONS: Final = "m.relates_to"
|
||||
|
||||
|
||||
class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
@@ -307,10 +310,6 @@ class AccountDataTypes:
|
||||
MSC4155_INVITE_PERMISSION_CONFIG: Final = (
|
||||
"org.matrix.msc4155.invite_permission_config"
|
||||
)
|
||||
# MSC4380: Invite blocking
|
||||
MSC4380_INVITE_PERMISSION_CONFIG: Final = (
|
||||
"org.matrix.msc4380.invite_permission_config"
|
||||
)
|
||||
# Synapse-specific behaviour. See "Client-Server API Extensions" documentation
|
||||
# in Admin API for more information.
|
||||
SYNAPSE_ADMIN_CLIENT_CONFIG: Final = "io.element.synapse.admin_client_config"
|
||||
@@ -364,3 +363,10 @@ class Direction(enum.Enum):
|
||||
class ProfileFields:
|
||||
DISPLAYNAME: Final = "displayname"
|
||||
AVATAR_URL: Final = "avatar_url"
|
||||
|
||||
|
||||
class MRelatesToFields:
|
||||
"""Fields found inside m.relates_to content blocks."""
|
||||
|
||||
EVENT_ID: Final = "event_id"
|
||||
REL_TYPE: Final = "rel_type"
|
||||
|
||||
@@ -137,7 +137,7 @@ class Codes(str, Enum):
|
||||
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
|
||||
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
|
||||
|
||||
# Part of MSC4155/MSC4380
|
||||
# Part of MSC4155
|
||||
INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED"
|
||||
|
||||
# Part of MSC4190
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.config.ratelimiting import RatelimitSettings
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Requester
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -101,7 +100,7 @@ class Ratelimiter:
|
||||
# and doesn't affect correctness.
|
||||
self._timer: WheelTimer[Hashable] = WheelTimer()
|
||||
|
||||
self.clock.looping_call(self._prune_message_counts, Duration(seconds=15))
|
||||
self.clock.looping_call(self._prune_message_counts, 15 * 1000)
|
||||
|
||||
def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable:
|
||||
"""Use the requester's MXID as a fallback key if no key is provided."""
|
||||
|
||||
@@ -450,8 +450,7 @@ async def start(
|
||||
await _base.start(hs, freeze=freeze)
|
||||
|
||||
# TODO: Feels like this should be moved somewhere else.
|
||||
for db in hs.get_datastores().databases:
|
||||
db.updates.start_doing_background_updates()
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
|
||||
|
||||
def start_reactor(
|
||||
|
||||
@@ -30,20 +30,24 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.constants import (
|
||||
MILLISECONDS_PER_SECOND,
|
||||
ONE_HOUR_SECONDS,
|
||||
ONE_MINUTE_SECONDS,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5)
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
|
||||
"""
|
||||
We wait 5 minutes to send the first set of stats as the server can be quite busy the
|
||||
first few minutes
|
||||
"""
|
||||
|
||||
PHONE_HOME_INTERVAL = Duration(hours=3)
|
||||
PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
|
||||
"""
|
||||
Phone home stats are sent every 3 hours
|
||||
"""
|
||||
@@ -218,13 +222,13 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
# table will decrease
|
||||
clock.looping_call(
|
||||
hs.get_datastores().main.generate_user_daily_visits,
|
||||
Duration(minutes=5),
|
||||
5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
clock.looping_call(
|
||||
hs.get_datastores().main.reap_monthly_active_users,
|
||||
Duration(hours=1),
|
||||
ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
)
|
||||
hs.get_datastores().main.reap_monthly_active_users()
|
||||
|
||||
@@ -263,14 +267,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
|
||||
if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
|
||||
generate_monthly_active_users()
|
||||
clock.looping_call(generate_monthly_active_users, Duration(minutes=5))
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
if hs.config.metrics.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(
|
||||
phone_stats_home,
|
||||
PHONE_HOME_INTERVAL,
|
||||
PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
hs,
|
||||
stats,
|
||||
)
|
||||
@@ -278,14 +282,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
performance_stats_init,
|
||||
)
|
||||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME,
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS,
|
||||
phone_stats_home,
|
||||
hs,
|
||||
stats,
|
||||
|
||||
@@ -65,6 +65,8 @@ from typing import (
|
||||
Sequence,
|
||||
)
|
||||
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
ApplicationServiceState,
|
||||
@@ -76,8 +78,7 @@ from synapse.events import EventBase
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import DeviceListUpdates, JsonMapping
|
||||
from synapse.util.clock import Clock, DelayedCallWrapper
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -502,10 +503,10 @@ class _Recoverer:
|
||||
self.service = service
|
||||
self.callback = callback
|
||||
self.backoff_counter = 1
|
||||
self.scheduled_recovery: DelayedCallWrapper | None = None
|
||||
self.scheduled_recovery: IDelayedCall | None = None
|
||||
|
||||
def recover(self) -> None:
|
||||
delay = Duration(seconds=2**self.backoff_counter)
|
||||
delay = 2**self.backoff_counter
|
||||
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
||||
self.scheduled_recovery = self.clock.call_later(
|
||||
delay,
|
||||
|
||||
@@ -672,8 +672,7 @@ class RootConfig:
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
" Top-level keys in later files overwrite ones in earlier files.",
|
||||
" may specify directories containing *.yaml files.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-secrets-in-config",
|
||||
|
||||
@@ -438,9 +438,6 @@ class ExperimentalConfig(Config):
|
||||
# previously calculated push actions.
|
||||
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
|
||||
|
||||
# MSC2666: Query mutual rooms between two users.
|
||||
self.msc2666_enabled: bool = experimental.get("msc2666_enabled", False)
|
||||
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||
|
||||
@@ -597,5 +594,5 @@ class ExperimentalConfig(Config):
|
||||
# (and MSC4308: Thread Subscriptions extension to Sliding Sync)
|
||||
self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False)
|
||||
|
||||
# MSC4380: Invite blocking
|
||||
self.msc4380_enabled: bool = experimental.get("msc4380_enabled", False)
|
||||
# MSC4360: Threads Extension to Sliding Sync
|
||||
self.msc4360_enabled: bool = experimental.get("msc4360_enabled", False)
|
||||
|
||||
@@ -75,7 +75,6 @@ from synapse.types import JsonDict, StrCollection, UserID, get_domain_from_id
|
||||
from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -133,7 +132,7 @@ class FederationClient(FederationBase):
|
||||
super().__init__(hs)
|
||||
|
||||
self.pdu_destination_tried: dict[str, dict[str, int]] = {}
|
||||
self._clock.looping_call(self._clear_tried_cache, Duration(minutes=1))
|
||||
self._clock.looping_call(self._clear_tried_cache, 60 * 1000)
|
||||
self.state = hs.get_state_handler()
|
||||
self.transport_layer = hs.get_federation_transport_client()
|
||||
|
||||
|
||||
@@ -89,7 +89,6 @@ from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.stringutils import parse_server_name
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -227,7 +226,7 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
# We pause a bit so that we don't start handling all rooms at once.
|
||||
await self._clock.sleep(Duration(seconds=random.uniform(0, 0.1)))
|
||||
await self._clock.sleep(random.uniform(0, 0.1))
|
||||
|
||||
async def on_backfill_request(
|
||||
self, origin: str, room_id: str, versions: list[str], limit: int
|
||||
@@ -302,9 +301,7 @@ class FederationServer(FederationBase):
|
||||
# Start a periodic check for old staged events. This is to handle
|
||||
# the case where locks time out, e.g. if another process gets killed
|
||||
# without dropping its locks.
|
||||
self._clock.looping_call(
|
||||
self._handle_old_staged_events, Duration(minutes=1)
|
||||
)
|
||||
self._clock.looping_call(self._handle_old_staged_events, 60 * 1000)
|
||||
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
|
||||
@@ -53,7 +53,6 @@ from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
@@ -138,7 +137,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
assert isinstance(queue, Sized)
|
||||
register(queue_name, queue=queue)
|
||||
|
||||
self.clock.looping_call(self._clear_queue, Duration(seconds=30))
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Stops this federation sender instance from sending further transactions."""
|
||||
|
||||
@@ -174,7 +174,6 @@ from synapse.types import (
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
|
||||
@@ -219,12 +218,12 @@ transaction_queue_pending_edus_gauge = LaterGauge(
|
||||
# Please note that rate limiting still applies, so while the loop is
|
||||
# executed every X seconds the destinations may not be woken up because
|
||||
# they are being rate limited following previous attempt failures.
|
||||
WAKEUP_RETRY_PERIOD = Duration(minutes=1)
|
||||
WAKEUP_RETRY_PERIOD_SEC = 60
|
||||
|
||||
# Time to wait in between waking up each destination, i.e. one destination
|
||||
# Time (in s) to wait in between waking up each destination, i.e. one destination
|
||||
# will be woken up every <x> seconds until we have woken every destination
|
||||
# has outstanding catch-up.
|
||||
WAKEUP_INTERVAL_BETWEEN_DESTINATIONS = Duration(seconds=5)
|
||||
WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5
|
||||
|
||||
|
||||
class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
@@ -380,7 +379,7 @@ class _DestinationWakeupQueue:
|
||||
|
||||
queue.attempt_new_transaction()
|
||||
|
||||
await self.clock.sleep(Duration(seconds=current_sleep_seconds))
|
||||
await self.clock.sleep(current_sleep_seconds)
|
||||
|
||||
if not self.queue:
|
||||
break
|
||||
@@ -469,7 +468,7 @@ class FederationSender(AbstractFederationSender):
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call_now(
|
||||
self.hs.run_as_background_process,
|
||||
WAKEUP_RETRY_PERIOD,
|
||||
WAKEUP_RETRY_PERIOD_SEC * 1000.0,
|
||||
"wake_destinations_needing_catchup",
|
||||
self._wake_destinations_needing_catchup,
|
||||
)
|
||||
@@ -1162,4 +1161,4 @@ class FederationSender(AbstractFederationSender):
|
||||
last_processed,
|
||||
)
|
||||
self.wake_destination(destination)
|
||||
await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS)
|
||||
await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC)
|
||||
|
||||
@@ -41,7 +41,6 @@ from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import SERVER_NAME_LABEL, sent_transactions_counter
|
||||
from synapse.types import JsonDict, ReadReceipt
|
||||
@@ -187,8 +186,7 @@ class PerDestinationQueue:
|
||||
self._transaction_manager.shutdown()
|
||||
try:
|
||||
if self.active_transmission_loop is not None:
|
||||
with PreserveLoggingContext():
|
||||
self.active_transmission_loop.cancel()
|
||||
self.active_transmission_loop.cancel()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -74,7 +73,7 @@ class AccountValidityHandler:
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, Duration(minutes=30))
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
|
||||
async def is_user_expired(self, user_id: str) -> bool:
|
||||
"""Checks if a user has expired against third-party modules.
|
||||
|
||||
@@ -74,7 +74,6 @@ from synapse.storage.databases.main.registration import (
|
||||
from synapse.types import JsonDict, Requester, StrCollection, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
@@ -243,7 +242,7 @@ class AuthHandler:
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self._clock.looping_call(
|
||||
run_as_background_process,
|
||||
Duration(minutes=5),
|
||||
5 * 60 * 1000,
|
||||
"expire_old_sessions",
|
||||
self.server_name,
|
||||
self._expire_old_sessions,
|
||||
|
||||
@@ -21,7 +21,6 @@ from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import ShadowBanError, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import set_tag
|
||||
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
||||
@@ -30,9 +29,11 @@ from synapse.replication.http.delayed_events import (
|
||||
)
|
||||
from synapse.storage.databases.main.delayed_events import (
|
||||
DelayedEventDetails,
|
||||
DelayID,
|
||||
EventType,
|
||||
StateKey,
|
||||
Timestamp,
|
||||
UserLocalpart,
|
||||
)
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import (
|
||||
@@ -42,7 +43,6 @@ from synapse.types import (
|
||||
UserID,
|
||||
create_requester,
|
||||
)
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.events import generate_fake_event_id
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.sentinel import Sentinel
|
||||
@@ -93,22 +93,20 @@ class DelayedEventsHandler:
|
||||
# Kick off again (without blocking) to catch any missed notifications
|
||||
# that may have fired before the callback was added.
|
||||
self._clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
# Now process any delayed events that are due to be sent.
|
||||
#
|
||||
# We set `reprocess_events` to True in case any events had been
|
||||
# marked as processed, but had not yet actually been sent,
|
||||
# before the homeserver stopped.
|
||||
#
|
||||
# Delayed events that are already marked as processed on startup might not have been
|
||||
# sent properly on the last run of the server, so unmark them to send them again.
|
||||
# Caveat: this will double-send delayed events that successfully persisted, but failed
|
||||
# to be removed from the DB table of delayed events.
|
||||
# TODO: To avoid double-sending, scan the timeline to find which of these events were
|
||||
# already sent. To do so, must store delay_ids in sent events to retrieve them later.
|
||||
await self._store.unprocess_delayed_events()
|
||||
|
||||
events, next_send_ts = await self._store.process_timeout_delayed_events(
|
||||
self._get_current_ts(), reprocess_events=True
|
||||
self._get_current_ts()
|
||||
)
|
||||
|
||||
if next_send_ts:
|
||||
@@ -401,68 +399,96 @@ class DelayedEventsHandler:
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
self._schedule_next_at(next_send_ts)
|
||||
|
||||
async def cancel(self, request: SynapseRequest, delay_id: str) -> None:
|
||||
async def cancel(self, requester: Requester, delay_id: str) -> None:
|
||||
"""
|
||||
Cancels the scheduled delivery of the matching delayed event.
|
||||
|
||||
Args:
|
||||
requester: The owner of the delayed event to act on.
|
||||
delay_id: The ID of the delayed event to act on.
|
||||
|
||||
Raises:
|
||||
NotFoundError: if no matching delayed event could be found.
|
||||
"""
|
||||
assert self._is_master
|
||||
await self._delayed_event_mgmt_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
requester,
|
||||
(requester.user.to_string(), requester.device_id),
|
||||
)
|
||||
await make_deferred_yieldable(self._initialized_from_db)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_event(delay_id)
|
||||
next_send_ts = await self._store.cancel_delayed_event(
|
||||
delay_id=delay_id,
|
||||
user_localpart=requester.user.localpart,
|
||||
)
|
||||
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
self._schedule_next_at_or_none(next_send_ts)
|
||||
|
||||
async def restart(self, request: SynapseRequest, delay_id: str) -> None:
|
||||
async def restart(self, requester: Requester, delay_id: str) -> None:
|
||||
"""
|
||||
Restarts the scheduled delivery of the matching delayed event.
|
||||
|
||||
Raises:
|
||||
NotFoundError: if no matching delayed event could be found.
|
||||
"""
|
||||
await self._delayed_event_mgmt_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
)
|
||||
|
||||
# Note: We don't need to wait on `self._initialized_from_db` here as the
|
||||
# events that deals with are already marked as processed.
|
||||
#
|
||||
# `restart_delayed_events` will skip over such events entirely.
|
||||
|
||||
next_send_ts = await self._store.restart_delayed_event(
|
||||
delay_id, self._get_current_ts()
|
||||
)
|
||||
|
||||
# Only the main process handles sending delayed events.
|
||||
if self._is_master:
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
self._schedule_next_at(next_send_ts)
|
||||
|
||||
async def send(self, request: SynapseRequest, delay_id: str) -> None:
|
||||
"""
|
||||
Immediately sends the matching delayed event, instead of waiting for its scheduled delivery.
|
||||
Args:
|
||||
requester: The owner of the delayed event to act on.
|
||||
delay_id: The ID of the delayed event to act on.
|
||||
|
||||
Raises:
|
||||
NotFoundError: if no matching delayed event could be found.
|
||||
"""
|
||||
assert self._is_master
|
||||
await self._delayed_event_mgmt_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
requester,
|
||||
(requester.user.to_string(), requester.device_id),
|
||||
)
|
||||
await make_deferred_yieldable(self._initialized_from_db)
|
||||
|
||||
event, next_send_ts = await self._store.process_target_delayed_event(delay_id)
|
||||
next_send_ts = await self._store.restart_delayed_event(
|
||||
delay_id=delay_id,
|
||||
user_localpart=requester.user.localpart,
|
||||
current_ts=self._get_current_ts(),
|
||||
)
|
||||
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
self._schedule_next_at(next_send_ts)
|
||||
|
||||
async def send(self, requester: Requester, delay_id: str) -> None:
|
||||
"""
|
||||
Immediately sends the matching delayed event, instead of waiting for its scheduled delivery.
|
||||
|
||||
Args:
|
||||
requester: The owner of the delayed event to act on.
|
||||
delay_id: The ID of the delayed event to act on.
|
||||
|
||||
Raises:
|
||||
NotFoundError: if no matching delayed event could be found.
|
||||
"""
|
||||
assert self._is_master
|
||||
# Use standard request limiter for sending delayed events on-demand,
|
||||
# as an on-demand send is similar to sending a regular event.
|
||||
await self._request_ratelimiter.ratelimit(requester)
|
||||
await make_deferred_yieldable(self._initialized_from_db)
|
||||
|
||||
event, next_send_ts = await self._store.process_target_delayed_event(
|
||||
delay_id=delay_id,
|
||||
user_localpart=requester.user.localpart,
|
||||
)
|
||||
|
||||
if self._next_send_ts_changed(next_send_ts):
|
||||
self._schedule_next_at_or_none(next_send_ts)
|
||||
|
||||
await self._send_event(event)
|
||||
await self._send_event(
|
||||
DelayedEventDetails(
|
||||
delay_id=DelayID(delay_id),
|
||||
user_localpart=UserLocalpart(requester.user.localpart),
|
||||
room_id=event.room_id,
|
||||
type=event.type,
|
||||
state_key=event.state_key,
|
||||
origin_server_ts=event.origin_server_ts,
|
||||
content=event.content,
|
||||
device_id=event.device_id,
|
||||
)
|
||||
)
|
||||
|
||||
async def _send_on_timeout(self) -> None:
|
||||
self._next_delayed_event_call = None
|
||||
@@ -509,17 +535,17 @@ class DelayedEventsHandler:
|
||||
|
||||
def _schedule_next_at(self, next_send_ts: Timestamp) -> None:
|
||||
delay = next_send_ts - self._get_current_ts()
|
||||
delay_duration = Duration(milliseconds=max(delay, 0))
|
||||
delay_sec = delay / 1000 if delay > 0 else 0
|
||||
|
||||
if self._next_delayed_event_call is None:
|
||||
self._next_delayed_event_call = self._clock.call_later(
|
||||
delay_duration,
|
||||
delay_sec,
|
||||
self.hs.run_as_background_process,
|
||||
"_send_on_timeout",
|
||||
self._send_on_timeout,
|
||||
)
|
||||
else:
|
||||
self._next_delayed_event_call.reset(delay_duration.as_secs())
|
||||
self._next_delayed_event_call.reset(delay_sec)
|
||||
|
||||
async def get_all_for_user(self, requester: Requester) -> list[JsonDict]:
|
||||
"""Return all pending delayed events requested by the given user."""
|
||||
@@ -585,7 +611,9 @@ class DelayedEventsHandler:
|
||||
finally:
|
||||
# TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure
|
||||
try:
|
||||
await self._store.delete_processed_delayed_event(event.delay_id)
|
||||
await self._store.delete_processed_delayed_event(
|
||||
event.delay_id, event.user_localpart
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to delete processed delayed event")
|
||||
|
||||
|
||||
@@ -71,7 +71,6 @@ from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import (
|
||||
NotRetryingDestination,
|
||||
@@ -86,7 +85,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages"
|
||||
MAX_DEVICE_DISPLAY_NAME_LEN = 100
|
||||
DELETE_STALE_DEVICES_INTERVAL = Duration(days=1)
|
||||
DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
|
||||
def _check_device_name_length(name: str | None) -> None:
|
||||
@@ -187,7 +186,7 @@ class DeviceHandler:
|
||||
):
|
||||
self.clock.looping_call(
|
||||
self.hs.run_as_background_process,
|
||||
DELETE_STALE_DEVICES_INTERVAL,
|
||||
DELETE_STALE_DEVICES_INTERVAL_MS,
|
||||
desc="delete_stale_devices",
|
||||
func=self._delete_stale_devices,
|
||||
)
|
||||
@@ -916,7 +915,7 @@ class DeviceHandler:
|
||||
)
|
||||
|
||||
DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000
|
||||
DEVICE_MSGS_DELETE_SLEEP = Duration(milliseconds=100)
|
||||
DEVICE_MSGS_DELETE_SLEEP_MS = 100
|
||||
|
||||
async def _delete_device_messages(
|
||||
self,
|
||||
@@ -942,7 +941,9 @@ class DeviceHandler:
|
||||
if from_stream_id is None:
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
await self.clock.sleep(DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP)
|
||||
await self.clock.sleep(
|
||||
DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0
|
||||
)
|
||||
|
||||
|
||||
class DeviceWriterHandler(DeviceHandler):
|
||||
@@ -1468,7 +1469,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
self._resync_retry_lock = Lock()
|
||||
self.clock.looping_call(
|
||||
self.hs.run_as_background_process,
|
||||
Duration(seconds=30),
|
||||
30 * 1000,
|
||||
func=self._maybe_retry_device_resync,
|
||||
desc="_maybe_retry_device_resync",
|
||||
)
|
||||
|
||||
@@ -321,7 +321,16 @@ class DirectoryHandler:
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
|
||||
|
||||
return await self.get_association(room_alias)
|
||||
result = await self.get_association_from_room_alias(room_alias)
|
||||
|
||||
if result is not None:
|
||||
return {"room_id": result.room_id, "servers": result.servers}
|
||||
else:
|
||||
raise SynapseError(
|
||||
404,
|
||||
"Room alias %r not found" % (room_alias.to_string(),),
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
async def _update_canonical_alias(
|
||||
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
|
||||
|
||||
@@ -46,7 +46,6 @@ from synapse.types import (
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.json import json_decoder
|
||||
from synapse.util.retryutils import (
|
||||
NotRetryingDestination,
|
||||
@@ -1635,7 +1634,7 @@ class E2eKeysHandler:
|
||||
# matrix.org has about 15M users in the e2e_one_time_keys_json table
|
||||
# (comprising 20M devices). We want this to take about a week, so we need
|
||||
# to do about one batch of 100 users every 4 seconds.
|
||||
await self.clock.sleep(Duration(seconds=4))
|
||||
await self.clock.sleep(4)
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
|
||||
@@ -72,7 +72,6 @@ from synapse.storage.invite_rule import InviteRule
|
||||
from synapse.types import JsonDict, StrCollection, get_domain_from_id
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.visibility import filter_events_for_server
|
||||
|
||||
@@ -1973,9 +1972,7 @@ class FederationHandler:
|
||||
logger.warning(
|
||||
"%s; waiting for %d ms...", e, e.retry_after_ms
|
||||
)
|
||||
await self.clock.sleep(
|
||||
Duration(milliseconds=e.retry_after_ms)
|
||||
)
|
||||
await self.clock.sleep(e.retry_after_ms / 1000)
|
||||
|
||||
# Success, no need to try the rest of the destinations.
|
||||
break
|
||||
|
||||
@@ -91,7 +91,6 @@ from synapse.types import (
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.iterutils import batch_iter, partition, sorted_topologically
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.stringutils import shortstr
|
||||
@@ -1803,7 +1802,7 @@ class FederationEventHandler:
|
||||
# the reactor. For large rooms let's yield to the reactor
|
||||
# occasionally to ensure we don't block other work.
|
||||
if (i + 1) % 1000 == 0:
|
||||
await self._clock.sleep(Duration(seconds=0))
|
||||
await self._clock.sleep(0)
|
||||
|
||||
# Also persist the new event in batches for similar reasons as above.
|
||||
for batch in batch_iter(events_and_contexts_to_persist, 1000):
|
||||
|
||||
+10
-20
@@ -83,7 +83,6 @@ from synapse.types.state import StateFilter
|
||||
from synapse.util import log_failure, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, gather_results
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.json import json_decoder, json_encoder
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.visibility import get_effective_room_visibility_from_state
|
||||
@@ -434,11 +433,14 @@ class MessageHandler:
|
||||
|
||||
# Figure out how many seconds we need to wait before expiring the event.
|
||||
now_ms = self.clock.time_msec()
|
||||
delay = Duration(milliseconds=max(expiry_ts - now_ms, 0))
|
||||
delay = (expiry_ts - now_ms) / 1000
|
||||
|
||||
logger.info(
|
||||
"Scheduling expiry for event %s in %.3fs", event_id, delay.as_secs()
|
||||
)
|
||||
# callLater doesn't support negative delays, so trim the delay to 0 if we're
|
||||
# in that case.
|
||||
if delay < 0:
|
||||
delay = 0
|
||||
|
||||
logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
|
||||
|
||||
self._scheduled_expiry = self.clock.call_later(
|
||||
delay,
|
||||
@@ -549,7 +551,7 @@ class EventCreationHandler:
|
||||
"send_dummy_events_to_fill_extremities",
|
||||
self._send_dummy_events_to_fill_extremities,
|
||||
),
|
||||
Duration(minutes=5),
|
||||
5 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._message_handler = hs.get_message_handler()
|
||||
@@ -1010,7 +1012,7 @@ class EventCreationHandler:
|
||||
|
||||
if not ignore_shadow_ban and requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
room_version = None
|
||||
@@ -1513,7 +1515,7 @@ class EventCreationHandler:
|
||||
and requester.shadow_banned
|
||||
):
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
if event.is_state():
|
||||
@@ -1955,12 +1957,6 @@ class EventCreationHandler:
|
||||
room_alias_str = event.content.get("alias", None)
|
||||
directory_handler = self.hs.get_directory_handler()
|
||||
if room_alias_str and room_alias_str != original_alias:
|
||||
if not isinstance(room_alias_str, str):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"The alias must be of type string.",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
await self._validate_canonical_alias(
|
||||
directory_handler, room_alias_str, event.room_id
|
||||
)
|
||||
@@ -1984,12 +1980,6 @@ class EventCreationHandler:
|
||||
new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
|
||||
if new_alt_aliases:
|
||||
for alias_str in new_alt_aliases:
|
||||
if not isinstance(alias_str, str):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Each alt_alias must be of type string.",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
await self._validate_canonical_alias(
|
||||
directory_handler, alias_str, event.room_id
|
||||
)
|
||||
|
||||
@@ -42,7 +42,6 @@ from synapse.types import (
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -117,7 +116,7 @@ class PaginationHandler:
|
||||
|
||||
self.clock.looping_call(
|
||||
self.hs.run_as_background_process,
|
||||
Duration(milliseconds=job.interval),
|
||||
job.interval,
|
||||
"purge_history_for_rooms_in_range",
|
||||
self.purge_history_for_rooms_in_range,
|
||||
job.shortest_max_lifetime,
|
||||
|
||||
@@ -121,7 +121,6 @@ from synapse.types import (
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
|
||||
@@ -204,7 +203,7 @@ EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
|
||||
|
||||
# Delay before a worker tells the presence handler that a user has stopped
|
||||
# syncing.
|
||||
UPDATE_SYNCING_USERS = Duration(seconds=10)
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
|
||||
|
||||
@@ -529,7 +528,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
|
||||
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
|
||||
|
||||
self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS)
|
||||
self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS)
|
||||
|
||||
hs.register_async_shutdown_handler(
|
||||
phase="before",
|
||||
@@ -582,7 +581,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
for (user_id, device_id), last_sync_ms in list(
|
||||
self._user_devices_going_offline.items()
|
||||
):
|
||||
if now - last_sync_ms > UPDATE_SYNCING_USERS.as_millis():
|
||||
if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
|
||||
self._user_devices_going_offline.pop((user_id, device_id), None)
|
||||
self.send_user_sync(user_id, device_id, False, last_sync_ms)
|
||||
|
||||
@@ -862,20 +861,20 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
self.clock.call_later(
|
||||
Duration(seconds=30),
|
||||
30,
|
||||
self.clock.looping_call,
|
||||
self._handle_timeouts,
|
||||
Duration(seconds=5),
|
||||
5000,
|
||||
)
|
||||
|
||||
# Presence information is persisted, whether or not it is being tracked
|
||||
# internally.
|
||||
if self._presence_enabled:
|
||||
self.clock.call_later(
|
||||
Duration(minutes=1),
|
||||
60,
|
||||
self.clock.looping_call,
|
||||
self._persist_unpersisted_changes,
|
||||
Duration(minutes=1),
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
presence_wheel_timer_size_gauge.register_hook(
|
||||
@@ -2431,7 +2430,7 @@ class PresenceFederationQueue:
|
||||
_KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000
|
||||
|
||||
# How often to check if we can expire entries from the queue.
|
||||
_CLEAR_ITEMS_EVERY_MS = Duration(minutes=1)
|
||||
_CLEAR_ITEMS_EVERY_MS = 60 * 1000
|
||||
|
||||
def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler):
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
@@ -34,7 +34,6 @@ from synapse.api.errors import (
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
|
||||
from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -584,7 +583,7 @@ class ProfileHandler:
|
||||
# Do not actually update the room state for shadow-banned users.
|
||||
if requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
return
|
||||
|
||||
room_ids = await self.store.get_rooms_for_user(target_user.to_string())
|
||||
|
||||
@@ -105,8 +105,6 @@ class RelationsHandler:
|
||||
) -> JsonDict:
|
||||
"""Get related events of a event, ordered by topological ordering.
|
||||
|
||||
TODO Accept a PaginationConfig instead of individual pagination parameters.
|
||||
|
||||
Args:
|
||||
requester: The user requesting the relations.
|
||||
event_id: Fetch events that relate to this event ID.
|
||||
|
||||
@@ -92,7 +92,6 @@ from synapse.types.state import StateFilter
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -1180,7 +1179,7 @@ class RoomCreationHandler:
|
||||
|
||||
if (invite_list or invite_3pid_list) and requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
|
||||
# Allow the request to go through, but remove any associated invites.
|
||||
invite_3pid_list = []
|
||||
|
||||
@@ -66,7 +66,6 @@ from synapse.types import (
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_left_room
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -643,7 +642,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
if action == Membership.INVITE and requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
key = (room_id,)
|
||||
@@ -1648,7 +1647,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
if requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
# We need to rate limit *before* we send out any 3PID invites, so we
|
||||
@@ -2191,7 +2190,7 @@ class RoomForgetterHandler(StateDeltasHandler):
|
||||
|
||||
# We kick this off to pick up outstanding work from before the last restart.
|
||||
self._clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
@@ -2233,7 +2232,7 @@ class RoomForgetterHandler(StateDeltasHandler):
|
||||
#
|
||||
# We wait for a short time so that we don't "tight" loop just
|
||||
# keeping the table up to date.
|
||||
await self._clock.sleep(Duration(milliseconds=500))
|
||||
await self._clock.sleep(0.5)
|
||||
|
||||
self.pos = self._store.get_room_max_stream_ordering()
|
||||
await self._store.update_room_forgetter_stream_pos(self.pos)
|
||||
|
||||
@@ -305,6 +305,7 @@ class SlidingSyncHandler:
|
||||
# account data, read receipts, typing indicators, to-device messages, etc).
|
||||
actual_room_ids=set(relevant_room_map.keys()),
|
||||
actual_room_response_map=rooms,
|
||||
room_membership_for_user_at_to_token_map=room_membership_for_user_map,
|
||||
from_token=from_token,
|
||||
to_token=to_token,
|
||||
)
|
||||
@@ -761,6 +762,8 @@ class SlidingSyncHandler:
|
||||
!= Membership.JOIN,
|
||||
filter_send_to_client=True,
|
||||
)
|
||||
# TODO: Filter out `EventTypes.CallInvite` in public rooms,
|
||||
# see https://github.com/element-hq/synapse/issues/17359
|
||||
|
||||
# TODO: Handle timeline gaps (`get_timeline_gaps()`)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
@@ -26,16 +27,28 @@ from typing import (
|
||||
|
||||
from typing_extensions import TypeAlias, assert_never
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EduTypes,
|
||||
EventContentFields,
|
||||
Membership,
|
||||
MRelatesToFields,
|
||||
RelationTypes,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
from synapse.handlers.sliding_sync.room_lists import RoomsForUserType
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage.databases.main.receipts import ReceiptInRoom
|
||||
from synapse.storage.databases.main.relations import ThreadUpdateInfo
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonMapping,
|
||||
MultiWriterStreamToken,
|
||||
RoomStreamToken,
|
||||
SlidingSyncStreamToken,
|
||||
StrCollection,
|
||||
StreamKeyType,
|
||||
StreamToken,
|
||||
ThreadSubscriptionsToken,
|
||||
)
|
||||
@@ -51,6 +64,7 @@ from synapse.util.async_helpers import (
|
||||
concurrently_execute,
|
||||
gather_optional_coroutines,
|
||||
)
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
_ThreadSubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadSubscription
|
||||
@@ -58,6 +72,7 @@ _ThreadSubscription: TypeAlias = (
|
||||
_ThreadUnsubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadUnsubscription
|
||||
)
|
||||
_ThreadUpdate: TypeAlias = SlidingSyncResult.Extensions.ThreadsExtension.ThreadUpdate
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -73,7 +88,10 @@ class SlidingSyncExtensionHandler:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.push_rules_handler = hs.get_push_rules_handler()
|
||||
self.relations_handler = hs.get_relations_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._enable_thread_subscriptions = hs.config.experimental.msc4306_enabled
|
||||
self._enable_threads_ext = hs.config.experimental.msc4360_enabled
|
||||
|
||||
@trace
|
||||
async def get_extensions_response(
|
||||
@@ -84,6 +102,7 @@ class SlidingSyncExtensionHandler:
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: set[str],
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType],
|
||||
to_token: StreamToken,
|
||||
from_token: SlidingSyncStreamToken | None,
|
||||
) -> SlidingSyncResult.Extensions:
|
||||
@@ -99,6 +118,8 @@ class SlidingSyncExtensionHandler:
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
room_membership_for_user_at_to_token_map: A map of room ID to the membership
|
||||
information for the user in the room at the time of `to_token`.
|
||||
to_token: The latest point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
@@ -174,6 +195,18 @@ class SlidingSyncExtensionHandler:
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
threads_coro = None
|
||||
if sync_config.extensions.threads is not None and self._enable_threads_ext:
|
||||
threads_coro = self.get_threads_extension_response(
|
||||
sync_config=sync_config,
|
||||
threads_request=sync_config.extensions.threads,
|
||||
actual_room_ids=actual_room_ids,
|
||||
actual_room_response_map=actual_room_response_map,
|
||||
room_membership_for_user_at_to_token_map=room_membership_for_user_at_to_token_map,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
(
|
||||
to_device_response,
|
||||
e2ee_response,
|
||||
@@ -181,6 +214,7 @@ class SlidingSyncExtensionHandler:
|
||||
receipts_response,
|
||||
typing_response,
|
||||
thread_subs_response,
|
||||
threads_response,
|
||||
) = await gather_optional_coroutines(
|
||||
to_device_coro,
|
||||
e2ee_coro,
|
||||
@@ -188,6 +222,7 @@ class SlidingSyncExtensionHandler:
|
||||
receipts_coro,
|
||||
typing_coro,
|
||||
thread_subs_coro,
|
||||
threads_coro,
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions(
|
||||
@@ -197,6 +232,7 @@ class SlidingSyncExtensionHandler:
|
||||
receipts=receipts_response,
|
||||
typing=typing_response,
|
||||
thread_subscriptions=thread_subs_response,
|
||||
threads=threads_response,
|
||||
)
|
||||
|
||||
def find_relevant_room_ids_for_extension(
|
||||
@@ -967,3 +1003,273 @@ class SlidingSyncExtensionHandler:
|
||||
unsubscribed=unsubscribed_threads,
|
||||
prev_batch=prev_batch,
|
||||
)
|
||||
|
||||
def _extract_thread_id_from_event(self, event: EventBase) -> str | None:
|
||||
"""Extract thread ID from event if it's a thread reply.
|
||||
|
||||
Args:
|
||||
event: The event to check.
|
||||
|
||||
Returns:
|
||||
The thread ID if the event is a thread reply, None otherwise.
|
||||
"""
|
||||
relates_to = event.content.get(EventContentFields.RELATIONS)
|
||||
if isinstance(relates_to, dict):
|
||||
if relates_to.get(MRelatesToFields.REL_TYPE) == RelationTypes.THREAD:
|
||||
return relates_to.get(MRelatesToFields.EVENT_ID)
|
||||
return None
|
||||
|
||||
def _find_threads_in_timeline(
|
||||
self,
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
) -> set[str]:
|
||||
"""Find all thread IDs that have events in room timelines.
|
||||
|
||||
Args:
|
||||
actual_room_response_map: A map of room ID to room results.
|
||||
|
||||
Returns:
|
||||
A set of thread IDs (thread root event IDs) that appear in the timeline.
|
||||
"""
|
||||
threads_in_timeline: set[str] = set()
|
||||
for room_result in actual_room_response_map.values():
|
||||
if room_result.timeline_events:
|
||||
for event in room_result.timeline_events:
|
||||
thread_id = self._extract_thread_id_from_event(event)
|
||||
if thread_id:
|
||||
threads_in_timeline.add(thread_id)
|
||||
return threads_in_timeline
|
||||
|
||||
def _merge_prev_batch_token(
|
||||
self,
|
||||
current_token: StreamToken | None,
|
||||
new_token: StreamToken | None,
|
||||
) -> StreamToken | None:
|
||||
"""Merge two prev_batch tokens, taking the maximum (latest) for backwards pagination.
|
||||
|
||||
Args:
|
||||
current_token: The current prev_batch token (may be None)
|
||||
new_token: The new prev_batch token to merge (may be None)
|
||||
|
||||
Returns:
|
||||
The merged token (maximum of the two, or None if both are None)
|
||||
"""
|
||||
if new_token is None:
|
||||
return current_token
|
||||
if current_token is None:
|
||||
return new_token
|
||||
if new_token.room_key.stream > current_token.room_key.stream:
|
||||
return new_token
|
||||
return current_token
|
||||
|
||||
def _merge_thread_updates(
|
||||
self,
|
||||
target: dict[str, list[ThreadUpdateInfo]],
|
||||
source: dict[str, list[ThreadUpdateInfo]],
|
||||
) -> None:
|
||||
"""Merge thread updates from source into target.
|
||||
|
||||
Args:
|
||||
target: The target dict to merge into (modified in place)
|
||||
source: The source dict to merge from
|
||||
"""
|
||||
for thread_id, updates in source.items():
|
||||
target.setdefault(thread_id, []).extend(updates)
|
||||
|
||||
async def get_threads_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
threads_request: SlidingSyncConfig.Extensions.ThreadsExtension,
|
||||
actual_room_ids: set[str],
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType],
|
||||
to_token: StreamToken,
|
||||
from_token: SlidingSyncStreamToken | None,
|
||||
) -> SlidingSyncResult.Extensions.ThreadsExtension | None:
|
||||
"""Handle Threads extension (MSC4360)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration.
|
||||
threads_request: The threads extension from the request.
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the
|
||||
sliding sync response. Used to determine which threads already have
|
||||
events in the room timeline.
|
||||
room_membership_for_user_at_to_token_map: A map of room ID to the membership
|
||||
information for the user in the room at the time of `to_token`.
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
|
||||
Returns:
|
||||
the response (None if empty or threads extension is disabled)
|
||||
"""
|
||||
if not threads_request.enabled:
|
||||
return None
|
||||
|
||||
# Identify which threads already have events in the room timelines.
|
||||
# If include_roots=False, we'll exclude these threads from the DB query
|
||||
# since the client already sees the thread activity in the timeline.
|
||||
# If include_roots=True, we fetch all threads regardless, because the client
|
||||
# wants the thread root events.
|
||||
threads_to_exclude: set[str] | None = None
|
||||
if not threads_request.include_roots:
|
||||
threads_to_exclude = self._find_threads_in_timeline(
|
||||
actual_room_response_map
|
||||
)
|
||||
|
||||
# Separate rooms into groups based on membership status.
|
||||
# For LEAVE/BAN rooms, we need to bound the to_token to prevent leaking events
|
||||
# that occurred after the user left/was banned.
|
||||
leave_ban_rooms: set[str] = set()
|
||||
other_rooms: set[str] = set()
|
||||
|
||||
for room_id in actual_room_ids:
|
||||
membership_info = room_membership_for_user_at_to_token_map.get(room_id)
|
||||
if membership_info and membership_info.membership in (
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
):
|
||||
leave_ban_rooms.add(room_id)
|
||||
else:
|
||||
other_rooms.add(room_id)
|
||||
|
||||
# Fetch thread updates, handling LEAVE/BAN rooms separately to avoid data leaks.
|
||||
all_thread_updates: dict[str, list[ThreadUpdateInfo]] = {}
|
||||
prev_batch_token: StreamToken | None = None
|
||||
remaining_limit = threads_request.limit
|
||||
|
||||
# Query for rooms where the user has left or been banned, using their leave/ban
|
||||
# event position as the upper bound to prevent seeing events after they left.
|
||||
if leave_ban_rooms:
|
||||
for room_id in leave_ban_rooms:
|
||||
if remaining_limit <= 0:
|
||||
# We've already fetched enough updates, but we still need to set
|
||||
# prev_batch to indicate there are more results.
|
||||
prev_batch_token = to_token
|
||||
break
|
||||
|
||||
membership_info = room_membership_for_user_at_to_token_map[room_id]
|
||||
bounded_to_token = membership_info.event_pos.to_room_stream_token()
|
||||
|
||||
(
|
||||
room_thread_updates,
|
||||
room_prev_batch,
|
||||
) = await self.store.get_thread_updates_for_rooms(
|
||||
room_ids={room_id},
|
||||
from_token=from_token.stream_token.room_key if from_token else None,
|
||||
to_token=bounded_to_token,
|
||||
limit=remaining_limit,
|
||||
exclude_thread_ids=threads_to_exclude,
|
||||
)
|
||||
|
||||
# Count how many updates we fetched and reduce the remaining limit
|
||||
num_updates = sum(
|
||||
len(updates) for updates in room_thread_updates.values()
|
||||
)
|
||||
remaining_limit -= num_updates
|
||||
|
||||
self._merge_thread_updates(all_thread_updates, room_thread_updates)
|
||||
prev_batch_token = self._merge_prev_batch_token(
|
||||
prev_batch_token, room_prev_batch
|
||||
)
|
||||
|
||||
# Query for rooms where the user is joined, invited, or knocking, using the
|
||||
# normal to_token as the upper bound.
|
||||
if other_rooms and remaining_limit > 0:
|
||||
(
|
||||
other_thread_updates,
|
||||
other_prev_batch,
|
||||
) = await self.store.get_thread_updates_for_rooms(
|
||||
room_ids=other_rooms,
|
||||
from_token=from_token.stream_token.room_key if from_token else None,
|
||||
to_token=to_token.room_key,
|
||||
limit=remaining_limit,
|
||||
exclude_thread_ids=threads_to_exclude,
|
||||
)
|
||||
|
||||
self._merge_thread_updates(all_thread_updates, other_thread_updates)
|
||||
prev_batch_token = self._merge_prev_batch_token(
|
||||
prev_batch_token, other_prev_batch
|
||||
)
|
||||
|
||||
if len(all_thread_updates) == 0:
|
||||
return None
|
||||
|
||||
# Build a mapping of event_id -> (thread_id, update) for efficient lookup
|
||||
# during visibility filtering.
|
||||
event_to_thread_map: dict[str, tuple[str, ThreadUpdateInfo]] = {}
|
||||
for thread_id, updates in all_thread_updates.items():
|
||||
for update in updates:
|
||||
event_to_thread_map[update.event_id] = (thread_id, update)
|
||||
|
||||
# Fetch and filter events for visibility
|
||||
all_events = await self.store.get_events_as_list(event_to_thread_map.keys())
|
||||
filtered_events = await filter_events_for_client(
|
||||
self._storage_controllers, sync_config.user.to_string(), all_events
|
||||
)
|
||||
|
||||
# Rebuild thread updates from filtered events
|
||||
filtered_updates: dict[str, list[ThreadUpdateInfo]] = defaultdict(list)
|
||||
for event in filtered_events:
|
||||
if event.event_id in event_to_thread_map:
|
||||
thread_id, update = event_to_thread_map[event.event_id]
|
||||
filtered_updates[thread_id].append(update)
|
||||
|
||||
if not filtered_updates:
|
||||
return None
|
||||
|
||||
# Note: Updates are already sorted by stream_ordering DESC from the database query,
|
||||
# and filter_events_for_client preserves order, so updates[0] is guaranteed to be
|
||||
# the latest event for each thread.
|
||||
|
||||
# Optionally fetch thread root events and their bundled aggregations
|
||||
thread_root_event_map = {}
|
||||
aggregations_map = {}
|
||||
if threads_request.include_roots:
|
||||
thread_root_events = await self.store.get_events_as_list(
|
||||
filtered_updates.keys()
|
||||
)
|
||||
thread_root_event_map = {e.event_id: e for e in thread_root_events}
|
||||
|
||||
if thread_root_event_map:
|
||||
aggregations_map = (
|
||||
await self.relations_handler.get_bundled_aggregations(
|
||||
thread_root_event_map.values(),
|
||||
sync_config.user.to_string(),
|
||||
)
|
||||
)
|
||||
|
||||
thread_updates: dict[str, dict[str, _ThreadUpdate]] = {}
|
||||
for thread_root, updates in filtered_updates.items():
|
||||
# We only care about the latest update for the thread.
|
||||
# After sorting above, updates[0] is guaranteed to be the latest (highest stream_ordering).
|
||||
latest_update = updates[0]
|
||||
|
||||
# Generate per-thread prev_batch token if this thread has multiple visible updates.
|
||||
# When we hit the global limit, we generate prev_batch tokens for all threads, even if
|
||||
# we only saw 1 update for them. This is to cover the case where we only saw
|
||||
# a single update for a given thread, but the global limit prevents us from
|
||||
# obtaining other updates which would have otherwise been included in the
|
||||
# range.
|
||||
per_thread_prev_batch = None
|
||||
if len(updates) > 1 or prev_batch_token is not None:
|
||||
# Create a token pointing to one position before the latest event's stream position.
|
||||
# This makes it exclusive - /relations with dir=b won't return the latest event again.
|
||||
# Use StreamToken.START as base (all other streams at 0) since only room position matters.
|
||||
per_thread_prev_batch = StreamToken.START.copy_and_replace(
|
||||
StreamKeyType.ROOM,
|
||||
RoomStreamToken(stream=latest_update.stream_ordering - 1),
|
||||
)
|
||||
|
||||
thread_updates.setdefault(latest_update.room_id, {})[thread_root] = (
|
||||
_ThreadUpdate(
|
||||
thread_root=thread_root_event_map.get(thread_root),
|
||||
prev_batch=per_thread_prev_batch,
|
||||
bundled_aggregations=aggregations_map.get(thread_root),
|
||||
)
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions.ThreadsExtension(
|
||||
updates=thread_updates,
|
||||
prev_batch=prev_batch_token,
|
||||
)
|
||||
|
||||
@@ -34,12 +34,10 @@ from synapse.api.constants import (
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.errors import SlidingSyncUnknownPosition
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import StrippedStateEvent
|
||||
from synapse.events.utils import parse_stripped_state_event
|
||||
from synapse.logging.opentracing import start_active_span, trace
|
||||
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS
|
||||
from synapse.storage.databases.main.state import (
|
||||
ROOM_UNKNOWN_SENTINEL,
|
||||
Sentinel as StateSentinel,
|
||||
@@ -70,7 +68,6 @@ from synapse.types.handlers.sliding_sync import (
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import MutableOverlayMapping
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.sentinel import Sentinel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -80,27 +77,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Minimum time in milliseconds since the last sync before we consider expiring
|
||||
# the connection due to too many rooms to send. This stops from getting into
|
||||
# tight loops with clients that request lots of data at once.
|
||||
#
|
||||
# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked.
|
||||
MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1)
|
||||
|
||||
# How many rooms with updates we allow before we consider the connection expired
|
||||
# due to too many rooms to send.
|
||||
#
|
||||
# c.f. `MINIMUM_NOT_USED_AGE_EXPIRY_MS`. These values are somewhat arbitrary
|
||||
# picked.
|
||||
NUM_ROOMS_THRESHOLD = 100
|
||||
|
||||
# Sanity check that our minimum age is sensible compared to the update interval,
|
||||
# i.e. if `MINIMUM_NOT_USED_AGE_EXPIRY_MS` is too small then we might expire the
|
||||
# connection even if it is actively being used (and we're just not updating the
|
||||
# DB frequently enough). We arbitrarily double the update interval to give some
|
||||
# wiggle room.
|
||||
assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY
|
||||
|
||||
# Helper definition for the types that we might return. We do this to avoid
|
||||
# copying data between types (which can be expensive for many rooms).
|
||||
RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync
|
||||
@@ -200,7 +176,6 @@ class SlidingSyncRoomLists:
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
async def compute_interested_rooms(
|
||||
self,
|
||||
@@ -882,41 +857,11 @@ class SlidingSyncRoomLists:
|
||||
|
||||
# We only need to check for new events since any state changes
|
||||
# will also come down as new events.
|
||||
|
||||
rooms_that_have_updates = await (
|
||||
self.store.get_rooms_that_have_updates_since_sliding_sync_table(
|
||||
rooms_that_have_updates = (
|
||||
self.store.get_rooms_that_might_have_updates(
|
||||
relevant_room_map.keys(), from_token.room_key
|
||||
)
|
||||
)
|
||||
|
||||
# Check if we have lots of updates to send, if so then its
|
||||
# better for us to tell the client to do a full resync
|
||||
# instead (to try and avoid long SSS response times when
|
||||
# there is new data).
|
||||
#
|
||||
# Due to the construction of the SSS API, the client is in
|
||||
# charge of setting the range of rooms to request updates
|
||||
# for. Generally, it will start with a small range and then
|
||||
# expand (and occasionally it may contract the range again
|
||||
# if its been offline for a while). If we know there are a
|
||||
# lot of updates, it's better to reset the connection and
|
||||
# wait for the client to start again (with a much smaller
|
||||
# range) than to try and send down a large number of updates
|
||||
# (which can take a long time).
|
||||
#
|
||||
# We only do this if the last sync was over
|
||||
# `MINIMUM_NOT_USED_AGE_EXPIRY_MS` to ensure we don't get
|
||||
# into tight loops with clients that keep requesting large
|
||||
# sliding sync windows.
|
||||
if len(rooms_that_have_updates) > NUM_ROOMS_THRESHOLD:
|
||||
last_sync_ts = previous_connection_state.last_used_ts
|
||||
if (
|
||||
last_sync_ts is not None
|
||||
and (self._clock.time_msec() - last_sync_ts)
|
||||
> MINIMUM_NOT_USED_AGE_EXPIRY.as_millis()
|
||||
):
|
||||
raise SlidingSyncUnknownPosition()
|
||||
|
||||
rooms_should_send.update(rooms_that_have_updates)
|
||||
relevant_rooms_to_send_map = {
|
||||
room_id: room_sync_config
|
||||
|
||||
@@ -75,7 +75,7 @@ class SlidingSyncConnectionStore:
|
||||
"""
|
||||
# If this is our first request, there is no previous connection state to fetch out of the database
|
||||
if from_token is None or from_token.connection_position == 0:
|
||||
return PerConnectionState(last_used_ts=None)
|
||||
return PerConnectionState()
|
||||
|
||||
conn_id = sync_config.conn_id or ""
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.events import get_plain_text_topic_from_event_content
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -73,7 +72,7 @@ class StatsHandler:
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating stats
|
||||
self.clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ from synapse.api.constants import (
|
||||
Direction,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
JoinRules,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.filtering import FilterCollection
|
||||
@@ -789,13 +790,22 @@ class SyncHandler:
|
||||
)
|
||||
)
|
||||
|
||||
loaded_recents = await filter_events_for_client(
|
||||
filtered_recents = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
)
|
||||
|
||||
loaded_recents = []
|
||||
for event in filtered_recents:
|
||||
if event.type == EventTypes.CallInvite:
|
||||
room_info = await self.store.get_room_with_stats(event.room_id)
|
||||
assert room_info is not None
|
||||
if room_info.join_rules == JoinRules.PUBLIC:
|
||||
continue
|
||||
loaded_recents.append(event)
|
||||
|
||||
log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
|
||||
|
||||
loaded_recents.extend(recents)
|
||||
|
||||
+10
-16
@@ -41,7 +41,6 @@ from synapse.types import (
|
||||
UserID,
|
||||
)
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
@@ -61,15 +60,15 @@ class RoomMember:
|
||||
|
||||
|
||||
# How often we expect remote servers to resend us presence.
|
||||
FEDERATION_TIMEOUT = Duration(minutes=1)
|
||||
FEDERATION_TIMEOUT = 60 * 1000
|
||||
|
||||
# How often to resend typing across federation.
|
||||
FEDERATION_PING_INTERVAL = Duration(seconds=40)
|
||||
FEDERATION_PING_INTERVAL = 40 * 1000
|
||||
|
||||
|
||||
# How long to remember a typing notification happened in a room before
|
||||
# forgetting about it.
|
||||
FORGET_TIMEOUT = Duration(minutes=10)
|
||||
FORGET_TIMEOUT = 10 * 60 * 1000
|
||||
|
||||
|
||||
class FollowerTypingHandler:
|
||||
@@ -107,7 +106,7 @@ class FollowerTypingHandler:
|
||||
|
||||
self._rooms_updated: set[str] = set()
|
||||
|
||||
self.clock.looping_call(self._handle_timeouts, Duration(seconds=5))
|
||||
self.clock.looping_call(self._handle_timeouts, 5000)
|
||||
self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT)
|
||||
|
||||
def _reset(self) -> None:
|
||||
@@ -142,10 +141,7 @@ class FollowerTypingHandler:
|
||||
# user.
|
||||
if self.federation and self.is_mine_id(member.user_id):
|
||||
last_fed_poke = self._member_last_federation_poke.get(member, None)
|
||||
if (
|
||||
not last_fed_poke
|
||||
or last_fed_poke + FEDERATION_PING_INTERVAL.as_millis() <= now
|
||||
):
|
||||
if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now:
|
||||
self.hs.run_as_background_process(
|
||||
"typing._push_remote",
|
||||
self._push_remote,
|
||||
@@ -169,7 +165,7 @@ class FollowerTypingHandler:
|
||||
|
||||
now = self.clock.time_msec()
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=member, then=now + FEDERATION_PING_INTERVAL.as_millis()
|
||||
now=now, obj=member, then=now + FEDERATION_PING_INTERVAL
|
||||
)
|
||||
|
||||
hosts: StrCollection = (
|
||||
@@ -319,7 +315,7 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
|
||||
if requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
await self.auth.check_user_in_room(room_id, requester)
|
||||
@@ -354,7 +350,7 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
|
||||
if requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
await self.auth.check_user_in_room(room_id, requester)
|
||||
@@ -432,10 +428,8 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
if user.domain in domains:
|
||||
logger.info("Got typing update from %s: %r", user_id, content)
|
||||
now = self.clock.time_msec()
|
||||
self._member_typing_until[member] = now + FEDERATION_TIMEOUT.as_millis()
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=member, then=now + FEDERATION_TIMEOUT.as_millis()
|
||||
)
|
||||
self._member_typing_until[member] = now + FEDERATION_TIMEOUT
|
||||
self.wheel_timer.insert(now=now, obj=member, then=now + FEDERATION_TIMEOUT)
|
||||
self._push_update_local(member=member, typing=content["typing"])
|
||||
|
||||
def _push_update_local(self, member: RoomMember, typing: bool) -> None:
|
||||
|
||||
@@ -40,7 +40,6 @@ from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.storage.databases.main.user_directory import SearchResult
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import UserID
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.stringutils import non_null_str_or_none
|
||||
@@ -53,7 +52,7 @@ logger = logging.getLogger(__name__)
|
||||
# Don't refresh a stale user directory entry, using a Federation /profile request,
|
||||
# for 60 seconds. This gives time for other state events to arrive (which will
|
||||
# then be coalesced such that only one /profile request is made).
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME = Duration(minutes=1)
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000
|
||||
|
||||
# Maximum number of remote servers that we will attempt to refresh profiles for
|
||||
# in one go.
|
||||
@@ -61,7 +60,7 @@ MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5
|
||||
|
||||
# As long as we have servers to refresh (without backoff), keep adding more
|
||||
# every 15 seconds.
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = Duration(seconds=15)
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15
|
||||
|
||||
|
||||
def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int:
|
||||
@@ -138,13 +137,13 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating the user directory
|
||||
self.clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
# Kick off the profile refresh process on startup
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
Duration(seconds=10),
|
||||
10,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
@@ -551,7 +550,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
now_ts = self.clock.time_msec()
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME.as_millis(),
|
||||
next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS,
|
||||
retry_counter=0,
|
||||
)
|
||||
# Schedule a wake-up to refresh the user directory for this server.
|
||||
@@ -559,13 +558,13 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# other servers ahead of it in the queue to get in the way of updating
|
||||
# the profile if the server only just sent us an event.
|
||||
self.clock.call_later(
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME + Duration(seconds=1),
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process_for_remote_server,
|
||||
UserID.from_string(user_id).domain,
|
||||
)
|
||||
# Schedule a wake-up to handle any backoffs that may occur in the future.
|
||||
self.clock.call_later(
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME * 2 + Duration(seconds=1),
|
||||
2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
return
|
||||
@@ -657,9 +656,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
if not users:
|
||||
return
|
||||
_, _, next_try_at_ts = users[0]
|
||||
delay = Duration(
|
||||
milliseconds=next_try_at_ts - self.clock.time_msec()
|
||||
) + Duration(seconds=2)
|
||||
delay = ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
delay,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
|
||||
@@ -39,7 +39,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
|
||||
from synapse.storage.databases.main.lock import Lock, LockStore
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.constants import ONE_MINUTE_SECONDS
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.opentracing import opentracing
|
||||
@@ -72,7 +72,7 @@ class WorkerLocksHandler:
|
||||
# that lock.
|
||||
self._locks: dict[tuple[str, str], WeakSet[WaitingLock | WaitingMultiLock]] = {}
|
||||
|
||||
self._clock.looping_call(self._cleanup_locks, Duration(seconds=30))
|
||||
self._clock.looping_call(self._cleanup_locks, 30_000)
|
||||
|
||||
self._notifier.add_lock_released_callback(self._on_lock_released)
|
||||
|
||||
@@ -184,10 +184,12 @@ class WorkerLocksHandler:
|
||||
locks: Collection[WaitingLock | WaitingMultiLock],
|
||||
) -> None:
|
||||
for lock in locks:
|
||||
lock.release_lock()
|
||||
deferred = lock.deferred
|
||||
if not deferred.called:
|
||||
deferred.callback(None)
|
||||
|
||||
self._clock.call_later(
|
||||
Duration(seconds=0),
|
||||
0,
|
||||
_wake_all_locks,
|
||||
locks,
|
||||
)
|
||||
@@ -213,12 +215,6 @@ class WaitingLock:
|
||||
lambda: start_active_span("WaitingLock.lock")
|
||||
)
|
||||
|
||||
def release_lock(self) -> None:
|
||||
"""Release the lock (by resolving the deferred)"""
|
||||
if not self.deferred.called:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.callback(None)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
self._lock_span.__enter__()
|
||||
|
||||
@@ -276,7 +272,7 @@ class WaitingLock:
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
|
||||
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
|
||||
logger.warning(
|
||||
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
|
||||
self._retry_interval,
|
||||
@@ -302,12 +298,6 @@ class WaitingMultiLock:
|
||||
lambda: start_active_span("WaitingLock.lock")
|
||||
)
|
||||
|
||||
def release_lock(self) -> None:
|
||||
"""Release the lock (by resolving the deferred)"""
|
||||
if not self.deferred.called:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.callback(None)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
self._lock_span.__enter__()
|
||||
|
||||
@@ -363,7 +353,7 @@ class WaitingMultiLock:
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
|
||||
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
|
||||
logger.warning(
|
||||
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
|
||||
self._retry_interval,
|
||||
|
||||
+13
-31
@@ -77,17 +77,12 @@ from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_u
|
||||
from synapse.http.proxyagent import ProxyAgent
|
||||
from synapse.http.replicationagent import ReplicationAgent
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.types import ISynapseReactor, StrSequence
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.json import json_decoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -162,9 +157,7 @@ def _is_ip_blocked(
|
||||
return False
|
||||
|
||||
|
||||
# The delay used by the scheduler to schedule tasks "as soon as possible", while
|
||||
# still allowing other tasks to run between runs.
|
||||
_EPSILON = Duration(microseconds=1)
|
||||
_EPSILON = 0.00000001
|
||||
|
||||
|
||||
def _make_scheduler(clock: Clock) -> Callable[[Callable[[], object]], IDelayedCall]:
|
||||
@@ -1043,8 +1036,7 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
Report a max size exceed error and disconnect the first time this is called.
|
||||
"""
|
||||
if not self.deferred.called:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
@@ -1143,8 +1135,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
logger.warning(
|
||||
"Exception encountered writing file data to stream: %s", e
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback()
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks: "multipart.MultipartCallbacks" = {
|
||||
@@ -1156,8 +1147,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
@@ -1167,8 +1157,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
self.parser.write(incoming_data)
|
||||
except Exception as e:
|
||||
logger.warning("Exception writing to multipart parser: %s", e)
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback()
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
@@ -1178,11 +1167,9 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self.multipart_response.length = self.file_length
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.callback(self.multipart_response)
|
||||
self.deferred.callback(self.multipart_response)
|
||||
else:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback(reason)
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
@@ -1206,8 +1193,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
try:
|
||||
self.stream.write(data)
|
||||
except Exception:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback()
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
self.length += len(data)
|
||||
@@ -1215,8 +1201,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
# connection. dataReceived might be called again if data was received
|
||||
# in the meantime.
|
||||
if self.max_size is not None and self.length >= self.max_size:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
@@ -1228,8 +1213,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.callback(self.length)
|
||||
self.deferred.callback(self.length)
|
||||
elif reason.check(PotentialDataLoss):
|
||||
# This applies to requests which don't set `Content-Length` or a
|
||||
# `Transfer-Encoding` in the response because in this case the end of the
|
||||
@@ -1238,11 +1222,9 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
# behavior is expected of some servers (like YouTube), let's ignore it.
|
||||
# Stolen from https://github.com/twisted/treq/pull/49/files
|
||||
# http://twistedmatrix.com/trac/ticket/4840
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.callback(self.length)
|
||||
self.deferred.callback(self.length)
|
||||
else:
|
||||
with PreserveLoggingContext():
|
||||
self.deferred.errback(reason)
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
def read_body_with_max_size(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user