Compare commits
31 Commits
release-v1
...
erikj/rust
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1780a70748 | ||
|
|
f3b7940e14 | ||
|
|
53e83c76b2 | ||
|
|
3d9f82efcb | ||
|
|
c85c5ace52 | ||
|
|
f2d2481e56 | ||
|
|
69fa29700e | ||
|
|
5261d2e2e8 | ||
|
|
f799eac7ea | ||
|
|
906cead9ca | ||
|
|
89e8b98b65 | ||
|
|
8ef0c8ff14 | ||
|
|
cf11919ddd | ||
|
|
526f84bc2e | ||
|
|
1cc729c177 | ||
|
|
b7e4bfd005 | ||
|
|
d4d3249ded | ||
|
|
8d7fcf9b76 | ||
|
|
dc0e896b68 | ||
|
|
c46fecd1f2 | ||
|
|
77f3986451 | ||
|
|
b58386e37e | ||
|
|
d3d9ca156e | ||
|
|
c2fe48a6ff | ||
|
|
bb5b47b62a | ||
|
|
26bc26586b | ||
|
|
c9b7e97355 | ||
|
|
3d20115115 | ||
|
|
a4ecb8e353 | ||
|
|
b5effc7201 | ||
|
|
b455c2a5ec |
@@ -5,18 +5,8 @@
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
# - invokes `trial` to run the tests with old deps.
|
||||
|
||||
# Prevent tzdata from asking for user input
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
python3 python3-dev python3-pip python3-venv pipx \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
@@ -33,12 +23,6 @@ export VIRTUALENV_NO_DOWNLOAD=1
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
|
||||
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
|
||||
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
|
||||
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
|
||||
# runs on 3.8 (#12343).
|
||||
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e '/^python = "^/!s/\^/==/g' \
|
||||
@@ -55,7 +39,7 @@ sed -i \
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install --user toml
|
||||
pip install toml wheel
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
@@ -69,8 +53,8 @@ with open('pyproject.toml', 'w') as f:
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pipx install poetry==1.1.14
|
||||
~/.local/bin/poetry lock
|
||||
pip install poetry==1.2.0
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
@@ -78,6 +62,3 @@ echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
~/.local/bin/poetry install -E "all test"
|
||||
~/.local/bin/poetry run trial --jobs=2 tests
|
||||
@@ -4,8 +4,12 @@
|
||||
# things to include
|
||||
!docker
|
||||
!synapse
|
||||
!rust
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
|
||||
**/__pycache__
|
||||
|
||||
34
.github/workflows/latest_deps.yml
vendored
34
.github/workflows/latest_deps.yml
vendored
@@ -5,7 +5,7 @@
|
||||
#
|
||||
# As an overview this workflow:
|
||||
# - checks out develop,
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - runs mypy and test suites in that checkout.
|
||||
#
|
||||
# Based on the twisted trunk CI job.
|
||||
@@ -26,12 +26,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0b1"
|
||||
poetry-version: "1.2.0"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -53,6 +60,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
@@ -69,6 +84,12 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
- run: python -m twisted.trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -113,6 +134,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
run: rm /src/poetry.lock
|
||||
@@ -187,4 +216,3 @@ jobs:
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/latest_deps_build_failed_issue_template.md
|
||||
|
||||
|
||||
65
.github/workflows/release-artifacts.yml
vendored
65
.github/workflows/release-artifacts.yml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
@@ -89,9 +89,67 @@ jobs:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-10.15"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
# Only build a single wheel in CI.
|
||||
- name: Set env vars.
|
||||
run: |
|
||||
echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Wheel
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
name: "Build pypi distribution files"
|
||||
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
|
||||
name: Build sdist
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- run: pip install build
|
||||
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
@@ -99,6 +157,7 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
needs:
|
||||
- build-debs
|
||||
- build-wheels
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
150
.github/workflows/tests.yml
vendored
150
.github/workflows/tests.yml
vendored
@@ -10,6 +10,23 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
|
||||
# don't modify Rust code.
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
with:
|
||||
filters: |
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -65,10 +82,54 @@ jobs:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
lint-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta]
|
||||
needs:
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
@@ -135,16 +196,54 @@ jobs:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
# Calculating the old-deps actually takes a bunch of time, so we cache the
|
||||
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
|
||||
# otherwise the `poetry install` step will error due to the poetry.lock
|
||||
# file being outdated.
|
||||
#
|
||||
# This caches the output of `Prepare old deps`, which should generate the
|
||||
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry-old-deps
|
||||
name: Cache poetry.lock
|
||||
with:
|
||||
path: |
|
||||
poetry.lock
|
||||
pyproject.toml
|
||||
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
run: .ci/scripts/prepare_old_deps.sh
|
||||
|
||||
# We only now install poetry so that `setup-python-poetry` caches the
|
||||
# right poetry.lock's dependencies.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -216,6 +315,14 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
@@ -322,6 +429,13 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -331,6 +445,25 @@ jobs:
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
cargo-test:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
@@ -345,6 +478,7 @@ jobs:
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
- cargo-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
|
||||
24
.github/workflows/twisted_trunk.yml
vendored
24
.github/workflows/twisted_trunk.yml
vendored
@@ -16,6 +16,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -34,6 +42,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -66,6 +82,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -60,3 +60,10 @@ book/
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
/synapse/*.so
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
9
Cargo.toml
Normal file
9
Cargo.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
# We make the whole Synapse folder a workspace so that we can run `cargo`
|
||||
# commands from the root (rather than having to cd into rust/).
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
20
build_rust.py
Normal file
20
build_rust.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# A build script for poetry that adds the rust extension.
|
||||
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from setuptools_rust import Binding, RustExtension
|
||||
|
||||
|
||||
def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
original_project_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
|
||||
|
||||
extension = RustExtension(
|
||||
target="synapse.synapse_rust",
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
1
changelog.d/12595.misc
Normal file
1
changelog.d/12595.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
1
changelog.d/13506.bugfix
Normal file
1
changelog.d/13506.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).
|
||||
1
changelog.d/13672.feature
Normal file
1
changelog.d/13672.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add admin APIs to fetch messages within a particular window of time.
|
||||
1
changelog.d/13680.feature
Normal file
1
changelog.d/13680.feature
Normal file
@@ -0,0 +1 @@
|
||||
Cancel the processing of key query requests when they time out.
|
||||
1
changelog.d/13687.feature
Normal file
1
changelog.d/13687.feature
Normal file
@@ -0,0 +1 @@
|
||||
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).
|
||||
1
changelog.d/13706.misc
Normal file
1
changelog.d/13706.misc
Normal file
@@ -0,0 +1 @@
|
||||
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.
|
||||
1
changelog.d/13707.misc
Normal file
1
changelog.d/13707.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update trial old deps CI to use poetry 1.2.0.
|
||||
1
changelog.d/13714.misc
Normal file
1
changelog.d/13714.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
1
changelog.d/13717.misc
Normal file
1
changelog.d/13717.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
1
changelog.d/13718.misc
Normal file
1
changelog.d/13718.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
1
changelog.d/13724.misc
Normal file
1
changelog.d/13724.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix typechecking with latest types-jsonschema.
|
||||
1
changelog.d/13725.misc
Normal file
1
changelog.d/13725.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update trial old deps CI to use poetry 1.2.0.
|
||||
1
changelog.d/13726.doc
Normal file
1
changelog.d/13726.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.
|
||||
1
changelog.d/13728.doc
Normal file
1
changelog.d/13728.doc
Normal file
@@ -0,0 +1 @@
|
||||
Define Synapse's compatability policy for SQLite versions.
|
||||
1
changelog.d/13730.misc
Normal file
1
changelog.d/13730.misc
Normal file
@@ -0,0 +1 @@
|
||||
Instrument `get_metadata_for_events` for understandable traces in Jaeger.
|
||||
1
changelog.d/13733.misc
Normal file
1
changelog.d/13733.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert `LruCache` linked lists into Rust.
|
||||
1
changelog.d/13734.misc
Normal file
1
changelog.d/13734.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
1
changelog.d/13735.misc
Normal file
1
changelog.d/13735.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
1
changelog.d/13738.bugfix
Normal file
1
changelog.d/13738.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where Synapse fails to start if a signing key file contains an empty line.
|
||||
1
changelog.d/13741.feature
Normal file
1
changelog.d/13741.feature
Normal file
@@ -0,0 +1 @@
|
||||
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.
|
||||
1
changelog.d/13743.misc
Normal file
1
changelog.d/13743.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
1
changelog.d/13746.bugfix
Normal file
1
changelog.d/13746.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.
|
||||
1
changelog.d/13748.misc
Normal file
1
changelog.d/13748.misc
Normal file
@@ -0,0 +1 @@
|
||||
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.
|
||||
1
changelog.d/13750.misc
Normal file
1
changelog.d/13750.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.
|
||||
1
changelog.d/13752.misc
Normal file
1
changelog.d/13752.misc
Normal file
@@ -0,0 +1 @@
|
||||
User an additional database query when persisting receipts.
|
||||
1
changelog.d/13754.misc
Normal file
1
changelog.d/13754.misc
Normal file
@@ -0,0 +1 @@
|
||||
Re-type hint some collections as read-only.
|
||||
1
changelog.d/13756.misc
Normal file
1
changelog.d/13756.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.
|
||||
1
changelog.d/13760.removal
Normal file
1
changelog.d/13760.removal
Normal file
@@ -0,0 +1 @@
|
||||
Synapse will now refuse to start if configured to use SQLite < 3.27.
|
||||
1
changelog.d/13763.misc
Normal file
1
changelog.d/13763.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -335,7 +335,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "Events",
|
||||
@@ -1423,7 +1423,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -1804,7 +1804,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -2437,7 +2437,7 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds{instance=\"$instance\"}[1m]))",
|
||||
"expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -2451,7 +2451,7 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds{instance=\"$instance\"}[1m]))",
|
||||
"expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -3425,7 +3425,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3518,7 +3518,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3726,7 +3726,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "successful txn rate",
|
||||
@@ -3736,7 +3736,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"legendFormat": "failed txn rate",
|
||||
"refId": "B"
|
||||
}
|
||||
@@ -3826,7 +3826,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "pdus",
|
||||
@@ -3836,7 +3836,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "edus",
|
||||
@@ -3928,7 +3928,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3939,7 +3939,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "edus",
|
||||
@@ -5042,7 +5042,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5054,7 +5054,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "failed {{job}}",
|
||||
@@ -5268,12 +5268,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{index}}",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
}
|
||||
@@ -5369,12 +5369,12 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{index}}",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
}
|
||||
@@ -5475,12 +5475,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5490,7 +5490,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5598,12 +5598,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5613,7 +5613,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5719,12 +5719,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5734,7 +5734,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6087,7 +6087,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6187,7 +6187,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
@@ -6287,7 +6287,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
@@ -6538,7 +6538,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6636,7 +6636,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
|
||||
"expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6737,7 +6737,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6839,7 +6839,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6936,7 +6936,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7033,7 +7033,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7122,7 +7122,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{job}}-{{index}} {{block_name}}",
|
||||
"refId": "A"
|
||||
@@ -7246,7 +7246,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
@@ -7347,7 +7347,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
@@ -7447,7 +7447,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7547,7 +7547,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7643,7 +7643,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -7763,7 +7763,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
@@ -7853,7 +7853,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
@@ -9556,7 +9556,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9716,7 +9716,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9793,7 +9793,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
@@ -9803,7 +9803,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
@@ -9813,7 +9813,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
@@ -9823,7 +9823,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
@@ -9905,7 +9905,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9982,7 +9982,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
@@ -9992,7 +9992,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
@@ -10002,7 +10002,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
@@ -10012,7 +10012,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
@@ -10297,7 +10297,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "State res ",
|
||||
"refId": "A"
|
||||
@@ -10306,7 +10306,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "Potential to prune",
|
||||
"refId": "B"
|
||||
@@ -10315,7 +10315,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "Pruned",
|
||||
"refId": "C"
|
||||
@@ -11069,7 +11069,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Notified",
|
||||
"refId": "A"
|
||||
@@ -11078,7 +11078,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Remote ping",
|
||||
"refId": "B"
|
||||
@@ -11087,7 +11087,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Total updates",
|
||||
"refId": "C"
|
||||
@@ -11096,7 +11096,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Remote updates",
|
||||
"refId": "D"
|
||||
@@ -11105,7 +11105,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Bump active time",
|
||||
"refId": "E"
|
||||
@@ -11789,7 +11789,7 @@
|
||||
"name": "instance",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, instance)",
|
||||
"refId": "Prometheus-instance-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11818,7 +11818,7 @@
|
||||
"name": "job",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, job)",
|
||||
"refId": "Prometheus-job-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11848,7 +11848,7 @@
|
||||
"name": "index",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, index)",
|
||||
"refId": "Prometheus-index-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11896,6 +11896,6 @@
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 132,
|
||||
"version": 133,
|
||||
"weekStart": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,55 +1,35 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
expr: 'synapse_federation_client_sent_edus_total + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
expr: 'synapse_federation_server_received_edus_total + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
expr: 'synapse_federation_server_received_pdus_total + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
# These 2 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -59,20 +39,25 @@ groups:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
# These 3 rules are used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
|
||||
7
debian/build_virtualenv
vendored
7
debian/build_virtualenv
vendored
@@ -61,7 +61,7 @@ dh_virtualenv \
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
@@ -78,9 +78,14 @@ case "$DEB_BUILD_OPTIONS" in
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
# To avoid pulling in the unbuilt Synapse in the local directory
|
||||
pushd /
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
popd
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
4
debian/changelog
vendored
4
debian/changelog
vendored
@@ -12,11 +12,15 @@ matrix-synapse-py3 (1.66.0) stable; urgency=medium
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
[ Jörg Behrmann ]
|
||||
* Update debhelper to compatibility level 12.
|
||||
* Drop the preinst script stopping synapse.
|
||||
* Allocate a group for the system user.
|
||||
* Change dpkg-statoverride to --force-statoverride-add.
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Disable `dh_auto_configure` as it broke during Rust build.
|
||||
|
||||
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
|
||||
|
||||
2
debian/rules
vendored
2
debian/rules
vendored
@@ -12,6 +12,8 @@ override_dh_installsystemd:
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
override_dh_auto_configure:
|
||||
|
||||
# many libraries pulled from PyPI have allocatable sections after
|
||||
# non-allocatable ones on which dwz errors out. For those without the issue the
|
||||
# gains are only marginal
|
||||
|
||||
@@ -92,11 +92,20 @@ RUN \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
@@ -108,8 +117,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY rust /synapse/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst /synapse/
|
||||
COPY pyproject.toml README.rst build_rust.py /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
@@ -72,6 +72,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
@@ -85,6 +86,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
libpq-dev \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
|
||||
@@ -393,6 +393,151 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
# Room Messages API
|
||||
|
||||
The Room Messages admin API allows server admins to get all messages
|
||||
sent to a room in a given timeframe. There are various parameters available
|
||||
that allow for filtering and ordering the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/messages
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish you fetch messages from.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
|
||||
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
|
||||
* `to` - The token to spot returning events at.
|
||||
* `limit` - The maximum number of events to return. Defaults to `10`.
|
||||
* `filter` - A JSON RoomEventFilter to filter returned events with.
|
||||
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `chunk` - A list of room events. The order depends on the dir parameter.
|
||||
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
|
||||
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
|
||||
If no further events are available, this property is omitted from the response.
|
||||
* `start` - A token corresponding to the start of chunk.
|
||||
* `state` - A list of state events relevant to showing the chunk.
|
||||
|
||||
**Example**
|
||||
|
||||
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
```json
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>",
|
||||
"msgtype": "m.text"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"name": "The room name"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"state_key": "",
|
||||
"type": "m.room.name",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"body": "Gangnam Style",
|
||||
"info": {
|
||||
"duration": 2140786,
|
||||
"h": 320,
|
||||
"mimetype": "video/mp4",
|
||||
"size": 1563685,
|
||||
"thumbnail_info": {
|
||||
"h": 300,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 46144,
|
||||
"w": 300
|
||||
},
|
||||
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
|
||||
"w": 480
|
||||
},
|
||||
"msgtype": "m.video",
|
||||
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"end": "t47409-4357353_219380_26003_2265",
|
||||
"start": "t47429-4392820_219380_26003_2265"
|
||||
}
|
||||
```
|
||||
|
||||
# Room Timestamp to Event API
|
||||
|
||||
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish to check.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `ts` - a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
* `event_id` - converted from timestamp
|
||||
|
||||
# Block Room API
|
||||
The Block Room admin API allows server admins to block and unblock rooms,
|
||||
and query to see if a given room is blocked.
|
||||
|
||||
@@ -42,6 +42,7 @@ It returns a JSON body like the following:
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
"consent_ts": null,
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
@@ -364,6 +365,7 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
- Remove user's consent information (consent version and timestamp)
|
||||
|
||||
## Reset password
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
@@ -17,6 +17,14 @@ Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at [https://endoflife.date/python](https://endoflife.date/python) and
|
||||
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
|
||||
|
||||
A Rust compiler is required to build Synapse from source. For any given release
|
||||
the minimum required version may be bumped up to a recent Rust version, and so
|
||||
people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
-------
|
||||
@@ -31,3 +39,15 @@ long process.
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
|
||||
For Rust, the situation is a bit different given that a) the Rust foundation
|
||||
does not generally support older Rust versions, and b) the library ecosystem
|
||||
generally bump their minimum support Rust versions frequently. In general, the
|
||||
Synapse team will try to avoid updating the dependency on Rust to the absolute
|
||||
latest version, but introducing a formal policy is hard given the constraints of
|
||||
the ecosystem.
|
||||
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
@@ -28,6 +28,9 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
@@ -114,6 +117,11 @@ Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
When changes are made to any Rust code then you must call either `poetry install`
|
||||
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
|
||||
is quicker than `poetry install`, so is recommended when making frequent
|
||||
changes to the Rust code.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
@@ -195,7 +203,7 @@ The database file can then be inspected with:
|
||||
sqlite3 _trial_temp/test.db
|
||||
```
|
||||
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
will always only contain the data generated by the *last run test*. Though generally
|
||||
when debugging, one is only running a single test anyway.
|
||||
|
||||
|
||||
@@ -196,6 +196,10 @@ System requirements:
|
||||
- Python 3.7 or later, up to Python 3.10.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
unavailable, you will need to have a recent Rust compiler installed. The easiest
|
||||
way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -1069,8 +1069,10 @@ Options related to caching.
|
||||
---
|
||||
### `event_cache_size`
|
||||
|
||||
The number of events to cache in memory. Not affected by
|
||||
`caches.global_factor` and is not part of the `caches` section. Defaults to 10K.
|
||||
The number of events to cache in memory. Defaults to 10K. Like other caches,
|
||||
this is affected by `caches.global_factor` (see below).
|
||||
|
||||
Note that this option is not part of the `caches` section.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
|
||||
6
mypy.ini
6
mypy.ini
@@ -16,7 +16,8 @@ files =
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
synapse/,
|
||||
tests/
|
||||
tests/,
|
||||
build_rust.py
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
@@ -181,3 +182,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
35
poetry.lock
generated
35
poetry.lock
generated
@@ -1035,6 +1035,18 @@ python-versions = ">=3.6"
|
||||
cryptography = ">=2.0"
|
||||
jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "semantic-version"
|
||||
version = "2.10.0"
|
||||
description = "A library implementing the 'SemVer' scheme."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7"
|
||||
|
||||
[package.extras]
|
||||
dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
|
||||
doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
@@ -1099,6 +1111,19 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-g
|
||||
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools-rust"
|
||||
version = "1.5.1"
|
||||
description = "Setuptools Rust extension plugin"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
semantic-version = ">=2.8.2,<3"
|
||||
setuptools = ">=62.4"
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[[package]]
|
||||
name = "signedjson"
|
||||
version = "1.1.4"
|
||||
@@ -1600,7 +1625,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d"
|
||||
content-hash = "79cfa09d59f9f8b5ef24318fb860df1915f54328692aa56d04331ecbdd92a8cb"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2472,6 +2497,10 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
semantic-version = [
|
||||
{file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
|
||||
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
@@ -2484,6 +2513,10 @@ setuptools = [
|
||||
{file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
|
||||
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
|
||||
]
|
||||
setuptools-rust = [
|
||||
{file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
|
||||
{file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
|
||||
]
|
||||
signedjson = [
|
||||
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
|
||||
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
|
||||
|
||||
@@ -52,6 +52,9 @@ include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
|
||||
[tool.maturin]
|
||||
manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.66.0"
|
||||
@@ -82,7 +85,16 @@ include = [
|
||||
{ path = "sytest-blacklist", format = "sdist" },
|
||||
{ path = "tests", format = "sdist" },
|
||||
{ path = "UPGRADE.rst", format = "sdist" },
|
||||
{ path = "Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.lock", format = "sdist" },
|
||||
{ path = "rust/src/**", format = "sdist" },
|
||||
]
|
||||
exclude = [
|
||||
{ path = "synapse/*.so", format = "sdist"}
|
||||
]
|
||||
|
||||
build = "build_rust.py"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
@@ -126,7 +138,7 @@ pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.11"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.0"
|
||||
bcrypt = ">=3.1.7"
|
||||
Pillow = ">=5.4.0"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
pymacaroons = ">=0.13.0"
|
||||
@@ -161,6 +173,15 @@ importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --no-dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
@@ -285,5 +306,21 @@ twine = "*"
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
# can end up polluting the next build with a .so that is for the wrong
|
||||
# Python version.
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
24
rust/Cargo.toml
Normal file
24
rust/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
# We name the package `synapse` so that things like logging have the right
|
||||
# logging target.
|
||||
name = "synapse"
|
||||
|
||||
# dummy version. See pyproject.toml for the Synapse's version number.
|
||||
version = "0.1.0"
|
||||
|
||||
edition = "2021"
|
||||
rust-version = "1.61.0"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[package.metadata.maturin]
|
||||
# This is where we tell maturin where to place the built library.
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
intrusive-collections = "0.9.4"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
|
||||
19
rust/src/lib.rs
Normal file
19
rust/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use pyo3::prelude::*;
|
||||
|
||||
mod lru_cache;
|
||||
|
||||
/// Formats the sum of two numbers as string.
|
||||
#[pyfunction]
|
||||
#[pyo3(text_signature = "(a, b, /)")]
|
||||
fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
|
||||
Ok((a + b).to_string())
|
||||
}
|
||||
|
||||
/// The entry point for defining the Python module.
|
||||
#[pymodule]
|
||||
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
|
||||
|
||||
lru_cache::register_module(py, m)?;
|
||||
Ok(())
|
||||
}
|
||||
236
rust/src/lru_cache.rs
Normal file
236
rust/src/lru_cache.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use intrusive_collections::{intrusive_adapter, LinkedListAtomicLink};
|
||||
use intrusive_collections::{LinkedList, LinkedListLink};
|
||||
use lazy_static::lazy_static;
|
||||
use log::error;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::PySet;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "push")?;
|
||||
child_module.add_class::<LruCacheNode>()?;
|
||||
child_module.add_class::<PerCacheLinkedList>()?;
|
||||
child_module.add_function(wrap_pyfunction!(get_global_list, m)?)?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.lru_cache", child_module)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
#[derive(Clone)]
|
||||
struct PerCacheLinkedList(Arc<Mutex<LinkedList<LruCacheNodeAdapterPerCache>>>);
|
||||
|
||||
#[pymethods]
|
||||
impl PerCacheLinkedList {
|
||||
#[new]
|
||||
fn new() -> PerCacheLinkedList {
|
||||
PerCacheLinkedList(Default::default())
|
||||
}
|
||||
|
||||
fn get_back(&self) -> Option<LruCacheNode> {
|
||||
let list = self.0.lock().expect("poisoned");
|
||||
list.back().clone_pointer().map(|n| LruCacheNode(n))
|
||||
}
|
||||
}
|
||||
|
||||
struct LruCacheNodeInner {
|
||||
per_cache_link: LinkedListAtomicLink,
|
||||
global_list_link: LinkedListAtomicLink,
|
||||
per_cache_list: Arc<Mutex<LinkedList<LruCacheNodeAdapterPerCache>>>,
|
||||
cache: Mutex<Option<PyObject>>,
|
||||
key: PyObject,
|
||||
value: Arc<Mutex<PyObject>>,
|
||||
callbacks: Py<PySet>,
|
||||
memory: usize,
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
struct LruCacheNode(Arc<LruCacheNodeInner>);
|
||||
|
||||
#[pymethods]
|
||||
impl LruCacheNode {
|
||||
#[new]
|
||||
fn py_new(
|
||||
cache: PyObject,
|
||||
cache_list: PerCacheLinkedList,
|
||||
key: PyObject,
|
||||
value: PyObject,
|
||||
callbacks: Py<PySet>,
|
||||
memory: usize,
|
||||
) -> Self {
|
||||
let node = Arc::new(LruCacheNodeInner {
|
||||
per_cache_link: Default::default(),
|
||||
global_list_link: Default::default(),
|
||||
per_cache_list: cache_list.0,
|
||||
cache: Mutex::new(Some(cache)),
|
||||
key,
|
||||
value: Arc::new(Mutex::new(value)),
|
||||
callbacks,
|
||||
memory,
|
||||
});
|
||||
|
||||
GLOBAL_LIST
|
||||
.lock()
|
||||
.expect("posioned")
|
||||
.push_front(node.clone());
|
||||
|
||||
node.per_cache_list
|
||||
.lock()
|
||||
.expect("posioned")
|
||||
.push_front(node.clone());
|
||||
|
||||
LruCacheNode(node)
|
||||
}
|
||||
|
||||
fn add_callbacks(&self, py: Python<'_>, new_callbacks: &PyAny) -> PyResult<()> {
|
||||
if new_callbacks.len()? == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let current_callbacks = self.0.callbacks.as_ref(py);
|
||||
|
||||
for cb in new_callbacks.iter()? {
|
||||
current_callbacks.add(cb?)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_and_clear_callbacks(&self, py: Python<'_>) {
|
||||
let callbacks = self.0.callbacks.as_ref(py);
|
||||
|
||||
if callbacks.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for callback in callbacks {
|
||||
if let Err(err) = callback.call0() {
|
||||
error!("LruCacheNode callback errored: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
callbacks.clear();
|
||||
}
|
||||
|
||||
fn drop_from_cache(&self) -> PyResult<()> {
|
||||
let cache = self.0.cache.lock().expect("poisoned").take();
|
||||
|
||||
if let Some(cache) = cache {
|
||||
Python::with_gil(|py| cache.call_method1(py, "pop", (&self.0.key, None::<()>)))?;
|
||||
}
|
||||
|
||||
self.drop_from_lists();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn drop_from_lists(&self) {
|
||||
if self.0.global_list_link.is_linked() {
|
||||
let mut glboal_list = GLOBAL_LIST.lock().expect("poisoned");
|
||||
|
||||
let mut curor_mut = unsafe {
|
||||
// Getting the cursor is unsafe as we need to ensure the list link
|
||||
// belongs to the given list.
|
||||
glboal_list.cursor_mut_from_ptr(Arc::into_raw(self.0.clone()))
|
||||
};
|
||||
|
||||
curor_mut.remove();
|
||||
}
|
||||
|
||||
if self.0.per_cache_link.is_linked() {
|
||||
let mut per_cache_list = self.0.per_cache_list.lock().expect("poisoned");
|
||||
|
||||
let mut curor_mut = unsafe {
|
||||
// Getting the cursor is unsafe as we need to ensure the list link
|
||||
// belongs to the given list.
|
||||
per_cache_list.cursor_mut_from_ptr(Arc::into_raw(self.0.clone()))
|
||||
};
|
||||
|
||||
curor_mut.remove();
|
||||
}
|
||||
}
|
||||
|
||||
fn move_to_front(&self) {
|
||||
if self.0.global_list_link.is_linked() {
|
||||
let mut global_list = GLOBAL_LIST.lock().expect("poisoned");
|
||||
|
||||
let mut curor_mut = unsafe {
|
||||
// Getting the cursor is unsafe as we need to ensure the list link
|
||||
// belongs to the given list.
|
||||
global_list.cursor_mut_from_ptr(Arc::into_raw(self.0.clone()))
|
||||
};
|
||||
curor_mut.remove();
|
||||
|
||||
global_list.push_front(self.0.clone());
|
||||
}
|
||||
|
||||
if self.0.per_cache_link.is_linked() {
|
||||
let mut per_cache_list = self.0.per_cache_list.lock().expect("poisoned");
|
||||
|
||||
let mut curor_mut = unsafe {
|
||||
// Getting the cursor is unsafe as we need to ensure the list link
|
||||
// belongs to the given list.
|
||||
per_cache_list.cursor_mut_from_ptr(Arc::into_raw(self.0.clone()))
|
||||
};
|
||||
|
||||
curor_mut.remove();
|
||||
|
||||
per_cache_list.push_front(self.0.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn key(&self) -> &PyObject {
|
||||
&self.0.key
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn value(&self) -> PyObject {
|
||||
self.0.value.lock().expect("poisoned").clone()
|
||||
}
|
||||
|
||||
#[setter]
|
||||
fn set_value(&self, value: PyObject) {
|
||||
*self.0.value.lock().expect("poisoned") = value
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn memory(&self) -> usize {
|
||||
self.0.memory
|
||||
}
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
fn get_global_list() -> Vec<LruCacheNode> {
|
||||
let list = GLOBAL_LIST.lock().expect("poisoned");
|
||||
|
||||
let mut vec = Vec::new();
|
||||
|
||||
let mut cursor = list.front();
|
||||
|
||||
while let Some(n) = cursor.clone_pointer() {
|
||||
vec.push(LruCacheNode(n));
|
||||
|
||||
cursor.move_next();
|
||||
}
|
||||
|
||||
vec
|
||||
}
|
||||
|
||||
intrusive_adapter!(LruCacheNodeAdapterPerCache = Arc<LruCacheNodeInner>: LruCacheNodeInner { per_cache_link: LinkedListLink });
|
||||
intrusive_adapter!(LruCacheNodeAdapterGlobal = Arc<LruCacheNodeInner>: LruCacheNodeInner { global_list_link: LinkedListLink });
|
||||
|
||||
lazy_static! {
|
||||
static ref GLOBAL_LIST_ADAPTER: LruCacheNodeAdapterGlobal = LruCacheNodeAdapterGlobal::new();
|
||||
static ref GLOBAL_LIST: Arc<Mutex<LinkedList<LruCacheNodeAdapterGlobal>>> =
|
||||
Arc::new(Mutex::new(LinkedList::new(GLOBAL_LIST_ADAPTER.clone())));
|
||||
}
|
||||
0
stubs/synapse/__init__.pyi
Normal file
0
stubs/synapse/__init__.pyi
Normal file
1
stubs/synapse/synapse_rust.pyi
Normal file
1
stubs/synapse/synapse_rust.pyi
Normal file
@@ -0,0 +1 @@
|
||||
def sum_as_string(a: int, b: int) -> str: ...
|
||||
@@ -38,6 +38,7 @@ from synapse.logging.opentracing import (
|
||||
trace,
|
||||
)
|
||||
from synapse.types import Requester, create_requester
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -118,6 +119,7 @@ class Auth:
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -166,6 +168,7 @@ class Auth:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -281,6 +284,7 @@ class Auth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
@@ -523,6 +527,7 @@ class Auth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
@cancellable
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
|
||||
@@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = {
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_room_id")
|
||||
def matrix_room_id_validator(room_id_str: str) -> bool:
|
||||
return RoomID.is_valid(room_id_str)
|
||||
def matrix_room_id_validator(room_id: object) -> bool:
|
||||
return isinstance(room_id, str) and RoomID.is_valid(room_id)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_user_id")
|
||||
def matrix_user_id_validator(user_id_str: str) -> bool:
|
||||
return UserID.is_valid(user_id_str)
|
||||
def matrix_user_id_validator(user_id: object) -> bool:
|
||||
return isinstance(user_id, str) and UserID.is_valid(user_id)
|
||||
|
||||
|
||||
class Filtering:
|
||||
|
||||
@@ -19,18 +19,23 @@ import attr
|
||||
|
||||
class EventFormatVersions:
|
||||
"""This is an internal enum for tracking the version of the event format,
|
||||
independently from the room version.
|
||||
independently of the room version.
|
||||
|
||||
To reduce confusion, the event format versions are named after the room
|
||||
versions that they were used or introduced in.
|
||||
The concept of an 'event format version' is specific to Synapse (the
|
||||
specification does not mention this term.)
|
||||
"""
|
||||
|
||||
V1 = 1 # $id:server event id format
|
||||
V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
|
||||
V3 = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
|
||||
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
|
||||
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
}
|
||||
|
||||
|
||||
@@ -92,7 +97,7 @@ class RoomVersions:
|
||||
V1 = RoomVersion(
|
||||
"1",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
StateResolutionVersions.V1,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -110,7 +115,7 @@ class RoomVersions:
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -128,7 +133,7 @@ class RoomVersions:
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -146,7 +151,7 @@ class RoomVersions:
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -164,7 +169,7 @@ class RoomVersions:
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -182,7 +187,7 @@ class RoomVersions:
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -200,7 +205,7 @@ class RoomVersions:
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -218,7 +223,7 @@ class RoomVersions:
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -236,7 +241,7 @@ class RoomVersions:
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -254,7 +259,7 @@ class RoomVersions:
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -272,7 +277,7 @@ class RoomVersions:
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -290,7 +295,7 @@ class RoomVersions:
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -308,7 +313,7 @@ class RoomVersions:
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
|
||||
@@ -32,15 +32,15 @@ logger = logging.getLogger("synapse.app.homeserver")
|
||||
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU")
|
||||
current_mau_by_service_gauge = Gauge(
|
||||
"synapse_admin_mau_current_mau_by_service",
|
||||
"Current MAU by service",
|
||||
["app_service"],
|
||||
)
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
"synapse_admin_mau_registered_reserved_users",
|
||||
"Registered users with reserved threepids",
|
||||
)
|
||||
|
||||
|
||||
@@ -217,7 +217,18 @@ class KeyConfig(Config):
|
||||
|
||||
signing_keys = self.read_file(signing_key_path, name)
|
||||
try:
|
||||
return read_signing_keys(signing_keys.splitlines(True))
|
||||
loaded_signing_keys = read_signing_keys(
|
||||
[
|
||||
signing_key_line
|
||||
for signing_key_line in signing_keys.splitlines(keepends=False)
|
||||
if signing_key_line.strip()
|
||||
]
|
||||
)
|
||||
|
||||
if not loaded_signing_keys:
|
||||
raise ConfigError(f"No signing keys in file {signing_key_path}")
|
||||
|
||||
return loaded_signing_keys
|
||||
except Exception as e:
|
||||
raise ConfigError("Error reading %s: %s" % (name, str(e)))
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
if event.format_version in (EventFormatVersions.ROOM_V1_V2,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
@@ -716,7 +716,7 @@ def check_redaction(
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
if room_version_obj.event_format == EventFormatVersions.V1:
|
||||
if room_version_obj.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
if not isinstance(event.redacts, str):
|
||||
return False
|
||||
|
||||
@@ -442,7 +442,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
format_version = EventFormatVersions.ROOM_V1_V2 # All events of this type are V1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -490,7 +490,7 @@ class FrozenEvent(EventBase):
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -567,7 +567,7 @@ class FrozenEventV2(EventBase):
|
||||
class FrozenEventV3(FrozenEventV2):
|
||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||
|
||||
format_version = EventFormatVersions.V3 # All events of this type are V3
|
||||
format_version = EventFormatVersions.ROOM_V4_PLUS # All events of this type are V3
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
@@ -597,11 +597,11 @@ def _event_type_from_format_version(
|
||||
`FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
return FrozenEvent
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
elif format_version == EventFormatVersions.ROOM_V3:
|
||||
return FrozenEventV2
|
||||
elif format_version == EventFormatVersions.V3:
|
||||
elif format_version == EventFormatVersions.ROOM_V4_PLUS:
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
@@ -137,7 +137,7 @@ class EventBuilder:
|
||||
# The types of auth/prev events changes between event versions.
|
||||
prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
auth_events = await self._store.add_event_hashes(auth_event_ids)
|
||||
prev_events = await self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
@@ -253,7 +253,7 @@ def create_local_event_from_event_dict(
|
||||
|
||||
time_now = int(clock.time_msec())
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
|
||||
@@ -45,7 +45,7 @@ class EventValidator:
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
if event.format_version == EventFormatVersions.V1:
|
||||
if event.format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
EventID.from_string(event.event_id)
|
||||
|
||||
required = [
|
||||
|
||||
@@ -194,7 +194,7 @@ async def _check_sigs_on_pdu(
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
event_domain = get_domain_from_id(pdu.event_id)
|
||||
if event_domain != sender_domain:
|
||||
try:
|
||||
|
||||
@@ -1190,7 +1190,7 @@ class FederationClient(FederationBase):
|
||||
# Otherwise, consider it a legitimate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
|
||||
@@ -62,12 +62,12 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"synapse_federation_client_sent_pdu_destinations_count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total",
|
||||
"synapse_federation_client_sent_pdu_destinations",
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ class AdminHandler:
|
||||
"appservice_id",
|
||||
"consent_server_notice_sent",
|
||||
"consent_version",
|
||||
"consent_ts",
|
||||
"user_type",
|
||||
"is_guest",
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ from synapse.types import (
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -124,6 +125,7 @@ class DeviceWorkerHandler:
|
||||
|
||||
return device
|
||||
|
||||
@cancellable
|
||||
async def get_device_changes_in_shared_rooms(
|
||||
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
|
||||
) -> Collection[str]:
|
||||
@@ -163,6 +165,7 @@ class DeviceWorkerHandler:
|
||||
|
||||
@trace
|
||||
@measure_func("device.get_user_ids_changed")
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
|
||||
@@ -37,7 +37,8 @@ from synapse.types import (
|
||||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async_helpers import Linearizer, delay_cancellation
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -91,6 +92,7 @@ class E2eKeysHandler:
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
self,
|
||||
query_body: JsonDict,
|
||||
@@ -208,22 +210,26 @@ class E2eKeysHandler:
|
||||
r[user_id] = remote_queries[user_id]
|
||||
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
# TODO It might make sense to propagate cancellations into the
|
||||
# deferreds which are querying remote homeservers.
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
delay_cancellation(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
)
|
||||
|
||||
ret = {"device_keys": results, "failures": failures}
|
||||
@@ -347,6 +353,7 @@ class E2eKeysHandler:
|
||||
|
||||
return
|
||||
|
||||
@cancellable
|
||||
async def get_cross_signing_keys_from_cache(
|
||||
self, query: Iterable[str], from_user_id: Optional[str]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
@@ -393,6 +400,7 @@ class E2eKeysHandler:
|
||||
}
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_local_devices(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, Requester, StreamKeyType
|
||||
@@ -423,6 +424,7 @@ class PaginationHandler:
|
||||
pagin_config: PaginationConfig,
|
||||
as_client_event: bool = True,
|
||||
event_filter: Optional[Filter] = None,
|
||||
use_admin_priviledge: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Get messages in a room.
|
||||
|
||||
@@ -432,10 +434,16 @@ class PaginationHandler:
|
||||
pagin_config: The pagination config rules to apply, if any.
|
||||
as_client_event: True to get events in client-server format.
|
||||
event_filter: Filter to apply to results or None
|
||||
use_admin_priviledge: if `True`, return all events, regardless
|
||||
of whether `user` has access to them. To be used **ONLY**
|
||||
from the admin API.
|
||||
|
||||
Returns:
|
||||
Pagination API results
|
||||
"""
|
||||
if use_admin_priviledge:
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if pagin_config.from_token:
|
||||
@@ -458,12 +466,14 @@ class PaginationHandler:
|
||||
room_token = from_token.room_key
|
||||
|
||||
async with self.pagination_lock.read(room_id):
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
if pagin_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
@@ -475,7 +485,7 @@ class PaginationHandler:
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
if membership == Membership.LEAVE:
|
||||
if not use_admin_priviledge and membership == Membership.LEAVE:
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
@@ -528,12 +538,13 @@ class PaginationHandler:
|
||||
if event_filter:
|
||||
events = await event_filter.filter(events)
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
if not use_admin_priviledge:
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
# return immediately - but there might be more in next_token batch
|
||||
|
||||
@@ -453,7 +453,6 @@ class RoomSummaryHandler:
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"room_id": e.room_id,
|
||||
"sender": e.sender,
|
||||
"origin_server_ts": e.origin_server_ts,
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import itertools
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
@@ -1413,10 +1414,10 @@ class SyncHandler:
|
||||
async def _generate_sync_entry_for_device_list(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
newly_joined_rooms: Set[str],
|
||||
newly_joined_or_invited_or_knocked_users: Set[str],
|
||||
newly_left_rooms: Set[str],
|
||||
newly_left_users: Set[str],
|
||||
newly_joined_rooms: AbstractSet[str],
|
||||
newly_joined_or_invited_or_knocked_users: AbstractSet[str],
|
||||
newly_left_rooms: AbstractSet[str],
|
||||
newly_left_users: AbstractSet[str],
|
||||
) -> DeviceListUpdates:
|
||||
"""Generate the DeviceListUpdates section of sync
|
||||
|
||||
@@ -1434,8 +1435,7 @@ class SyncHandler:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
|
||||
# We're going to mutate these fields, so lets copy them rather than
|
||||
# assume they won't get used later.
|
||||
# Take a copy since these fields will be mutated later.
|
||||
newly_joined_or_invited_or_knocked_users = set(
|
||||
newly_joined_or_invited_or_knocked_users
|
||||
)
|
||||
@@ -1635,8 +1635,8 @@ class SyncHandler:
|
||||
async def _generate_sync_entry_for_presence(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
newly_joined_rooms: Set[str],
|
||||
newly_joined_or_invited_users: Set[str],
|
||||
newly_joined_rooms: AbstractSet[str],
|
||||
newly_joined_or_invited_users: AbstractSet[str],
|
||||
) -> None:
|
||||
"""Generates the presence portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
@@ -1694,7 +1694,7 @@ class SyncHandler:
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
account_data_by_room: Dict[str, Dict[str, JsonDict]],
|
||||
) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
|
||||
) -> Tuple[AbstractSet[str], AbstractSet[str], AbstractSet[str], AbstractSet[str]]:
|
||||
"""Generates the rooms portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
|
||||
@@ -2534,7 +2534,7 @@ class SyncResultBuilder:
|
||||
archived: List[ArchivedSyncResult] = attr.Factory(list)
|
||||
to_device: List[JsonDict] = attr.Factory(list)
|
||||
|
||||
def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:
|
||||
def calculate_user_changes(self) -> Tuple[AbstractSet[str], AbstractSet[str]]:
|
||||
"""Work out which other users have joined or left rooms we are joined to.
|
||||
|
||||
This data only is only useful for an incremental sync.
|
||||
|
||||
@@ -28,7 +28,8 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError
|
||||
from pydantic.error_wrappers import ErrorWrapper
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.web.server import Request
|
||||
@@ -714,7 +715,21 @@ def parse_and_validate_json_object_from_request(
|
||||
try:
|
||||
instance = model_type.parse_obj(content)
|
||||
except ValidationError as e:
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=Codes.BAD_JSON)
|
||||
# Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a
|
||||
# more specific error if possible (which occasionally helps us to be spec-
|
||||
# compliant) This is a bit awkward because the spec's error codes aren't very
|
||||
# clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM.
|
||||
errcode = Codes.BAD_JSON
|
||||
|
||||
raw_errors = e.raw_errors
|
||||
if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper):
|
||||
raw_error = raw_errors[0].exc
|
||||
if isinstance(raw_error, MissingError):
|
||||
errcode = Codes.MISSING_PARAM
|
||||
elif isinstance(raw_error, PydanticValueError):
|
||||
errcode = Codes.INVALID_PARAM
|
||||
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode)
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@ from prometheus_client.core import Sample
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.util import caches
|
||||
|
||||
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||
|
||||
|
||||
@@ -88,11 +86,16 @@ LEGACY_METRIC_NAMES = {
|
||||
"synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
|
||||
"synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
|
||||
"synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
|
||||
"synapse_util_caches_cache_total": "synapse_util_caches_cache:total",
|
||||
"synapse_util_caches_cache": "synapse_util_caches_cache:total",
|
||||
"synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
|
||||
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
|
||||
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
|
||||
"synapse_util_caches_response_cache_total": "synapse_util_caches_response_cache:total",
|
||||
"synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
|
||||
"synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
|
||||
"synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
|
||||
"synapse_admin_mau_current": "synapse_admin_mau:current",
|
||||
"synapse_admin_mau_max": "synapse_admin_mau:max",
|
||||
"synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
|
||||
}
|
||||
|
||||
|
||||
@@ -102,11 +105,6 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
|
||||
by prometheus-client.
|
||||
"""
|
||||
|
||||
# Trigger the cache metrics to be rescraped, which updates the common
|
||||
# metrics but do not produce metrics themselves
|
||||
for collector in caches.collectors_by_name.values():
|
||||
collector.collect()
|
||||
|
||||
output = []
|
||||
|
||||
for metric in registry.collect():
|
||||
|
||||
@@ -61,9 +61,11 @@ from synapse.rest.admin.rooms import (
|
||||
MakeRoomAdminRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomMembersRestServlet,
|
||||
RoomMessagesRestServlet,
|
||||
RoomRestServlet,
|
||||
RoomRestV2Servlet,
|
||||
RoomStateRestServlet,
|
||||
RoomTimestampToEventRestServlet,
|
||||
)
|
||||
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
|
||||
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
|
||||
@@ -271,6 +273,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
DestinationResetConnectionRestServlet(hs).register(http_server)
|
||||
DestinationRestServlet(hs).register(http_server)
|
||||
ListDestinationsRestServlet(hs).register(http_server)
|
||||
RoomMessagesRestServlet(hs).register(http_server)
|
||||
RoomTimestampToEventRestServlet(hs).register(http_server)
|
||||
|
||||
# Some servlets only get registered for the main process.
|
||||
if hs.config.worker.worker_app is None:
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.rest.admin._base import (
|
||||
)
|
||||
from synapse.storage.databases.main.room import RoomSortOrder
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, RoomID, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
|
||||
@@ -858,3 +859,106 @@ class BlockRoomRestServlet(RestServlet):
|
||||
await self._store.unblock_room(room_id)
|
||||
|
||||
return HTTPStatus.OK, {"block": block}
|
||||
|
||||
|
||||
class RoomMessagesRestServlet(RestServlet):
|
||||
"""
|
||||
Get messages list of a room.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/messages$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self._pagination_handler = hs.get_pagination_handler()
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester)
|
||||
|
||||
pagination_config = await PaginationConfig.from_request(
|
||||
self._store, request, default_limit=10
|
||||
)
|
||||
# Twisted will have processed the args by now.
|
||||
assert request.args is not None
|
||||
as_client_event = b"raw" not in request.args
|
||||
filter_str = parse_string(request, "filter", encoding="utf-8")
|
||||
if filter_str:
|
||||
filter_json = urlparse.unquote(filter_str)
|
||||
event_filter: Optional[Filter] = Filter(
|
||||
self._hs, json_decoder.decode(filter_json)
|
||||
)
|
||||
if (
|
||||
event_filter
|
||||
and event_filter.filter_json.get("event_format", "client")
|
||||
== "federation"
|
||||
):
|
||||
as_client_event = False
|
||||
else:
|
||||
event_filter = None
|
||||
|
||||
msgs = await self._pagination_handler.get_messages(
|
||||
room_id=room_id,
|
||||
requester=requester,
|
||||
pagin_config=pagination_config,
|
||||
as_client_event=as_client_event,
|
||||
event_filter=event_filter,
|
||||
use_admin_priviledge=True,
|
||||
)
|
||||
|
||||
return HTTPStatus.OK, msgs
|
||||
|
||||
|
||||
class RoomTimestampToEventRestServlet(RestServlet):
|
||||
"""
|
||||
API endpoint to fetch the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query
|
||||
parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
`ts` is a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
|
||||
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp.
|
||||
|
||||
GET /_synapse/admin/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
|
||||
{
|
||||
"event_id": ...
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/timestamp_to_event$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
self._timestamp_lookup_handler = hs.get_timestamp_lookup_handler()
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester)
|
||||
|
||||
timestamp = parse_integer(request, "ts", required=True)
|
||||
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
|
||||
|
||||
(
|
||||
event_id,
|
||||
origin_server_ts,
|
||||
) = await self._timestamp_lookup_handler.get_event_for_timestamp(
|
||||
requester, room_id, timestamp, direction
|
||||
)
|
||||
|
||||
return HTTPStatus.OK, {
|
||||
"event_id": event_id,
|
||||
"origin_server_ts": origin_server_ts,
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import StrictBool, StrictStr, constr
|
||||
@@ -41,7 +41,11 @@ from synapse.http.servlet import (
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.metrics import threepid_send_requests
|
||||
from synapse.push.mailer import Mailer
|
||||
from synapse.rest.client.models import AuthenticationData, EmailRequestTokenBody
|
||||
from synapse.rest.client.models import (
|
||||
AuthenticationData,
|
||||
EmailRequestTokenBody,
|
||||
MsisdnRequestTokenBody,
|
||||
)
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
@@ -400,23 +404,16 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
self.identity_handler = hs.get_identity_handler()
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(
|
||||
body, ["client_secret", "country", "phone_number", "send_attempt"]
|
||||
body = parse_and_validate_json_object_from_request(
|
||||
request, MsisdnRequestTokenBody
|
||||
)
|
||||
client_secret = body["client_secret"]
|
||||
assert_valid_client_secret(client_secret)
|
||||
|
||||
country = body["country"]
|
||||
phone_number = body["phone_number"]
|
||||
send_attempt = body["send_attempt"]
|
||||
next_link = body.get("next_link") # Optional param
|
||||
|
||||
msisdn = phone_number_to_msisdn(country, phone_number)
|
||||
msisdn = phone_number_to_msisdn(body.country, body.phone_number)
|
||||
|
||||
if not await check_3pid_allowed(self.hs, "msisdn", msisdn):
|
||||
raise SynapseError(
|
||||
403,
|
||||
# TODO: is this error message accurate? Looks like we've only rejected
|
||||
# this phone number, not necessarily all phone numbers
|
||||
"Account phone numbers are not authorized on this server",
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
@@ -425,9 +422,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
request, "msisdn", msisdn
|
||||
)
|
||||
|
||||
if next_link:
|
||||
if body.next_link:
|
||||
# Raise if the provided next_link value isn't valid
|
||||
assert_valid_next_link(self.hs, next_link)
|
||||
assert_valid_next_link(self.hs, body.next_link)
|
||||
|
||||
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
|
||||
|
||||
@@ -454,15 +451,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
|
||||
ret = await self.identity_handler.requestMsisdnToken(
|
||||
self.hs.config.registration.account_threepid_delegate_msisdn,
|
||||
country,
|
||||
phone_number,
|
||||
client_secret,
|
||||
send_attempt,
|
||||
next_link,
|
||||
body.country,
|
||||
body.phone_number,
|
||||
body.client_secret,
|
||||
body.send_attempt,
|
||||
body.next_link,
|
||||
)
|
||||
|
||||
threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe(
|
||||
send_attempt
|
||||
body.send_attempt
|
||||
)
|
||||
|
||||
return 200, ret
|
||||
@@ -845,17 +842,18 @@ class AccountStatusRestServlet(RestServlet):
|
||||
self._auth = hs.get_auth()
|
||||
self._account_handler = hs.get_account_handler()
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
# TODO: we could validate that each user id is an mxid here, and/or parse it
|
||||
# as a UserID
|
||||
user_ids: List[StrictStr]
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await self._auth.get_user_by_req(request)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
if "user_ids" not in body:
|
||||
raise SynapseError(
|
||||
400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
|
||||
)
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
|
||||
statuses, failures = await self._account_handler.get_account_statuses(
|
||||
body["user_ids"],
|
||||
body.user_ids,
|
||||
allow_remote=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import log_kv, set_tag
|
||||
from synapse.rest.client._base import client_patterns, interactive_auth_handler
|
||||
from synapse.types import JsonDict, StreamToken
|
||||
|
||||
from ._base import client_patterns, interactive_auth_handler
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -156,6 +156,7 @@ class KeyQueryServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
@cancellable
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
@@ -199,6 +200,7 @@ class KeyChangesServlet(RestServlet):
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
@cancellable
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ class AuthenticationData(RequestBodyModel):
|
||||
|
||||
(The name "Authentication Data" is taken directly from the spec.)
|
||||
|
||||
Additional keys will be present, depending on the `type` field. Use `.dict()` to
|
||||
access them.
|
||||
Additional keys will be present, depending on the `type` field. Use
|
||||
`.dict(exclude_unset=True)` to access them.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
@@ -36,7 +36,7 @@ class AuthenticationData(RequestBodyModel):
|
||||
type: Optional[StrictStr] = None
|
||||
|
||||
|
||||
class EmailRequestTokenBody(RequestBodyModel):
|
||||
class ThreePidRequestTokenBody(RequestBodyModel):
|
||||
if TYPE_CHECKING:
|
||||
client_secret: StrictStr
|
||||
else:
|
||||
@@ -47,7 +47,7 @@ class EmailRequestTokenBody(RequestBodyModel):
|
||||
max_length=255,
|
||||
strict=True,
|
||||
)
|
||||
email: StrictStr
|
||||
|
||||
id_server: Optional[StrictStr]
|
||||
id_access_token: Optional[StrictStr]
|
||||
next_link: Optional[StrictStr]
|
||||
@@ -61,9 +61,25 @@ class EmailRequestTokenBody(RequestBodyModel):
|
||||
raise ValueError("id_access_token is required if an id_server is supplied.")
|
||||
return token
|
||||
|
||||
|
||||
class EmailRequestTokenBody(ThreePidRequestTokenBody):
|
||||
email: StrictStr
|
||||
|
||||
# Canonicalise the email address. The addresses are all stored canonicalised
|
||||
# in the database. This allows the user to reset his password without having to
|
||||
# know the exact spelling (eg. upper and lower case) of address in the database.
|
||||
# Without this, an email stored in the database as "foo@bar.com" would cause
|
||||
# user requests for "FOO@bar.com" to raise a Not Found error.
|
||||
_email_validator = validator("email", allow_reuse=True)(validate_email)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
ISO3116_1_Alpha_2 = StrictStr
|
||||
else:
|
||||
# Per spec: two-letter uppercase ISO-3166-1-alpha-2
|
||||
ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
|
||||
|
||||
|
||||
class MsisdnRequestTokenBody(ThreePidRequestTokenBody):
|
||||
country: ISO3116_1_Alpha_2
|
||||
phone_number: StrictStr
|
||||
|
||||
@@ -341,7 +341,17 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
return domain_specific_string.domain == self.hostname
|
||||
|
||||
def is_mine_id(self, string: str) -> bool:
|
||||
return string.split(":", 1)[1] == self.hostname
|
||||
"""Determines whether a user ID or room alias originates from this homeserver.
|
||||
|
||||
Returns:
|
||||
`True` if the hostname part of the user ID or room alias matches this
|
||||
homeserver.
|
||||
`False` otherwise, or if the user ID or room alias is malformed.
|
||||
"""
|
||||
localpart_hostname = string.split(":", 1)
|
||||
if len(localpart_hostname) < 2:
|
||||
return False
|
||||
return localpart_hostname[1] == self.hostname
|
||||
|
||||
@cache_in_self
|
||||
def get_clock(self) -> Clock:
|
||||
|
||||
@@ -36,6 +36,7 @@ from synapse.storage.util.partial_state_events_tracker import (
|
||||
PartialStateEventsTracker,
|
||||
)
|
||||
from synapse.types import MutableStateMap, StateMap
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -229,6 +230,7 @@ class StateStorageController:
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_state_ids_for_events(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -350,6 +352,7 @@ class StateStorageController:
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_state_group_for_events(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -398,6 +401,7 @@ class StateStorageController:
|
||||
event_id, room_id, prev_group, delta_ids, current_state_ids
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_current_state_ids(
|
||||
self,
|
||||
room_id: str,
|
||||
|
||||
@@ -533,15 +533,14 @@ class DatabasePool:
|
||||
if isinstance(self.engine, Sqlite3Engine):
|
||||
self._unsafe_to_upsert_tables.add("user_directory_search")
|
||||
|
||||
if self.engine.can_native_upsert:
|
||||
# Check ASAP (and then later, every 1s) to see if we have finished
|
||||
# background updates of tables that aren't safe to update.
|
||||
self._clock.call_later(
|
||||
0.0,
|
||||
run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
# Check ASAP (and then later, every 1s) to see if we have finished
|
||||
# background updates of tables that aren't safe to update.
|
||||
self._clock.call_later(
|
||||
0.0,
|
||||
run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
|
||||
def name(self) -> str:
|
||||
"Return the name of this database"
|
||||
@@ -1160,11 +1159,8 @@ class DatabasePool:
|
||||
attempts = 0
|
||||
while True:
|
||||
try:
|
||||
# We can autocommit if we are going to use native upserts
|
||||
autocommit = (
|
||||
self.engine.can_native_upsert
|
||||
and table not in self._unsafe_to_upsert_tables
|
||||
)
|
||||
# We can autocommit if it is safe to upsert
|
||||
autocommit = table not in self._unsafe_to_upsert_tables
|
||||
|
||||
return await self.runInteraction(
|
||||
desc,
|
||||
@@ -1199,7 +1195,7 @@ class DatabasePool:
|
||||
) -> bool:
|
||||
"""
|
||||
Pick the UPSERT method which works best on the platform. Either the
|
||||
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
|
||||
native one (Pg9.5+, SQLite >= 3.24), or fall back to an emulated method.
|
||||
|
||||
Args:
|
||||
txn: The transaction to use.
|
||||
@@ -1207,14 +1203,15 @@ class DatabasePool:
|
||||
keyvalues: The unique key tables and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
lock: True to lock the table when doing the upsert.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
Returns:
|
||||
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||
not empty then this always returns True)
|
||||
"""
|
||||
insertion_values = insertion_values or {}
|
||||
|
||||
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
||||
if table not in self._unsafe_to_upsert_tables:
|
||||
return self.simple_upsert_txn_native_upsert(
|
||||
txn, table, keyvalues, values, insertion_values=insertion_values
|
||||
)
|
||||
@@ -1365,14 +1362,12 @@ class DatabasePool:
|
||||
value_names: The value column names
|
||||
value_values: A list of each row's value column values.
|
||||
Ignored if value_names is empty.
|
||||
lock: True to lock the table when doing the upsert. Unused if the database engine
|
||||
supports native upserts.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
"""
|
||||
|
||||
# We can autocommit if we are going to use native upserts
|
||||
autocommit = (
|
||||
self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables
|
||||
)
|
||||
# We can autocommit if it safe to upsert
|
||||
autocommit = table not in self._unsafe_to_upsert_tables
|
||||
|
||||
await self.runInteraction(
|
||||
desc,
|
||||
@@ -1406,10 +1401,10 @@ class DatabasePool:
|
||||
value_names: The value column names
|
||||
value_values: A list of each row's value column values.
|
||||
Ignored if value_names is empty.
|
||||
lock: True to lock the table when doing the upsert. Unused if the database engine
|
||||
supports native upserts.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
"""
|
||||
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
||||
if table not in self._unsafe_to_upsert_tables:
|
||||
return self.simple_upsert_many_txn_native_upsert(
|
||||
txn, table, key_names, key_values, value_names, value_values
|
||||
)
|
||||
|
||||
@@ -53,6 +53,7 @@ from synapse.util import json_decoder, json_encoder
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.stringutils import shortstr
|
||||
|
||||
@@ -668,6 +669,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
...
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_user_devices_from_cache(
|
||||
self, query_list: List[Tuple[str, Optional[str]]]
|
||||
) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
|
||||
@@ -743,6 +745,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
|
||||
return self._device_list_stream_cache.get_all_entities_changed(from_key)
|
||||
|
||||
@cancellable
|
||||
async def get_users_whose_devices_changed(
|
||||
self,
|
||||
from_key: int,
|
||||
@@ -1221,6 +1224,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
desc="get_min_device_lists_changes_in_room",
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_device_list_changes_in_rooms(
|
||||
self, room_ids: Collection[str], from_id: int
|
||||
) -> Optional[Set[str]]:
|
||||
|
||||
@@ -50,6 +50,7 @@ from synapse.storage.util.id_generators import StreamIdGenerator
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -135,6 +136,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
return now_stream_id, []
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_e2e_device_keys_for_cs_api(
|
||||
self, query_list: List[Tuple[str, Optional[str]]]
|
||||
) -> Dict[str, Dict[str, JsonDict]]:
|
||||
@@ -197,6 +199,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
...
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_e2e_device_keys_and_signatures(
|
||||
self,
|
||||
query_list: Collection[Tuple[str, Optional[str]]],
|
||||
@@ -887,6 +890,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
|
||||
return keys
|
||||
|
||||
@cancellable
|
||||
async def get_e2e_cross_signing_keys_bulk(
|
||||
self, user_ids: List[str], from_user_id: Optional[str] = None
|
||||
) -> Dict[str, Optional[Dict[str, JsonDict]]]:
|
||||
@@ -902,7 +906,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
keys were not found, either their user ID will not be in the dict,
|
||||
or their user ID will map to None.
|
||||
"""
|
||||
|
||||
result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
|
||||
|
||||
if from_user_id:
|
||||
|
||||
@@ -48,6 +48,7 @@ from synapse.types import JsonDict
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -976,6 +977,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
|
||||
return int(min_depth) if min_depth is not None else None
|
||||
|
||||
@cancellable
|
||||
async def get_forward_extremities_for_room_at_stream_ordering(
|
||||
self, room_id: str, stream_ordering: int
|
||||
) -> List[str]:
|
||||
@@ -1606,7 +1608,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
logger.info("Invalid prev_events for %s", event_id)
|
||||
continue
|
||||
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
for prev_event_tuple in prev_events:
|
||||
if (
|
||||
not isinstance(prev_event_tuple, list)
|
||||
|
||||
@@ -81,6 +81,7 @@ from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.caches.lrucache import AsyncLruCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -339,6 +340,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
) -> Optional[EventBase]:
|
||||
...
|
||||
|
||||
@cancellable
|
||||
async def get_event(
|
||||
self,
|
||||
event_id: str,
|
||||
@@ -433,6 +435,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_events_as_list(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -584,6 +587,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
return events
|
||||
|
||||
@cancellable
|
||||
async def _get_events_from_cache_or_db(
|
||||
self, event_ids: Iterable[str], allow_rejected: bool = False
|
||||
) -> Dict[str, EventCacheEntry]:
|
||||
@@ -1156,7 +1160,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
if format_version is None:
|
||||
# This means that we stored the event before we had the concept
|
||||
# of a event format version, so it must be a V1 event.
|
||||
format_version = EventFormatVersions.V1
|
||||
format_version = EventFormatVersions.ROOM_V1_V2
|
||||
|
||||
room_version_id = row.room_version_id
|
||||
|
||||
@@ -1186,10 +1190,10 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
#
|
||||
# So, the following approximations should be adequate.
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
# if it's event format v1 then it must be room v1 or v2
|
||||
room_version = RoomVersions.V1
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
elif format_version == EventFormatVersions.ROOM_V3:
|
||||
# if it's event format v2 then it must be room v3
|
||||
room_version = RoomVersions.V3
|
||||
else:
|
||||
|
||||
@@ -129,91 +129,48 @@ class LockStore(SQLBaseStore):
|
||||
now = self._clock.time_msec()
|
||||
token = random_string(6)
|
||||
|
||||
if self.db_pool.engine.can_native_upsert:
|
||||
|
||||
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
||||
# We take out the lock if either a) there is no row for the lock
|
||||
# already, b) the existing row has timed out, or c) the row is
|
||||
# for this instance (which means the process got killed and
|
||||
# restarted)
|
||||
sql = """
|
||||
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE
|
||||
SET
|
||||
token = EXCLUDED.token,
|
||||
instance_name = EXCLUDED.instance_name,
|
||||
last_renewed_ts = EXCLUDED.last_renewed_ts
|
||||
WHERE
|
||||
worker_locks.last_renewed_ts < ?
|
||||
OR worker_locks.instance_name = EXCLUDED.instance_name
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
),
|
||||
)
|
||||
|
||||
# We only acquired the lock if we inserted or updated the table.
|
||||
return bool(txn.rowcount)
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock",
|
||||
_try_acquire_lock_txn,
|
||||
# We can autocommit here as we're executing a single query, this
|
||||
# will avoid serialization errors.
|
||||
db_autocommit=True,
|
||||
)
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
else:
|
||||
# If we're on an old SQLite we emulate the above logic by first
|
||||
# clearing out any existing stale locks and then upserting.
|
||||
|
||||
def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool:
|
||||
sql = """
|
||||
DELETE FROM worker_locks
|
||||
WHERE
|
||||
lock_name = ?
|
||||
AND lock_key = ?
|
||||
AND (last_renewed_ts < ? OR instance_name = ?)
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(lock_name, lock_key, now - _LOCK_TIMEOUT_MS, self._instance_name),
|
||||
)
|
||||
|
||||
inserted = self.db_pool.simple_upsert_txn_emulated(
|
||||
txn,
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
},
|
||||
values={},
|
||||
insertion_values={
|
||||
"token": token,
|
||||
"last_renewed_ts": self._clock.time_msec(),
|
||||
"instance_name": self._instance_name,
|
||||
},
|
||||
)
|
||||
|
||||
return inserted
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock_emulated", _try_acquire_lock_emulated_txn
|
||||
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
||||
# We take out the lock if either a) there is no row for the lock
|
||||
# already, b) the existing row has timed out, or c) the row is
|
||||
# for this instance (which means the process got killed and
|
||||
# restarted)
|
||||
sql = """
|
||||
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE
|
||||
SET
|
||||
token = EXCLUDED.token,
|
||||
instance_name = EXCLUDED.instance_name,
|
||||
last_renewed_ts = EXCLUDED.last_renewed_ts
|
||||
WHERE
|
||||
worker_locks.last_renewed_ts < ?
|
||||
OR worker_locks.instance_name = EXCLUDED.instance_name
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
),
|
||||
)
|
||||
|
||||
if not did_lock:
|
||||
return None
|
||||
# We only acquired the lock if we inserted or updated the table.
|
||||
return bool(txn.rowcount)
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock",
|
||||
_try_acquire_lock_txn,
|
||||
# We can autocommit here as we're executing a single query, this
|
||||
# will avoid serialization errors.
|
||||
db_autocommit=True,
|
||||
)
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
lock = Lock(
|
||||
self._reactor,
|
||||
|
||||
@@ -812,7 +812,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
# FIXME: This shouldn't invalidate the whole cache
|
||||
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
|
||||
|
||||
self.db_pool.simple_delete_txn(
|
||||
self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
table="receipts_graph",
|
||||
keyvalues={
|
||||
@@ -820,17 +820,13 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
"receipt_type": receipt_type,
|
||||
"user_id": user_id,
|
||||
},
|
||||
)
|
||||
self.db_pool.simple_insert_txn(
|
||||
txn,
|
||||
table="receipts_graph",
|
||||
values={
|
||||
"room_id": room_id,
|
||||
"receipt_type": receipt_type,
|
||||
"user_id": user_id,
|
||||
"event_ids": json_encoder.encode(event_ids),
|
||||
"data": json_encoder.encode(data),
|
||||
},
|
||||
# receipts_graph has a unique constraint on
|
||||
# (user_id, room_id, receipt_type), so no need to lock
|
||||
lock=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -175,6 +175,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
"is_guest",
|
||||
"admin",
|
||||
"consent_version",
|
||||
"consent_ts",
|
||||
"consent_server_notice_sent",
|
||||
"appservice_id",
|
||||
"creation_ts",
|
||||
@@ -2227,7 +2228,10 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
txn,
|
||||
table="users",
|
||||
keyvalues={"name": user_id},
|
||||
updatevalues={"consent_version": consent_version},
|
||||
updatevalues={
|
||||
"consent_version": consent_version,
|
||||
"consent_ts": self._clock.time_msec(),
|
||||
},
|
||||
)
|
||||
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ from synapse.types import JsonDict, PersistedEventPosition, StateMap, get_domain
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches import intern_string
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -191,8 +192,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
(aka. with the lowest depth). This is done to match the sort in
|
||||
`get_current_hosts_in_room()` and so we can re-use the cache but it's
|
||||
not horrible to have here either.
|
||||
"""
|
||||
|
||||
Uses `m.room.member`s in the room state at the current forward extremities to
|
||||
determine which users are in the room.
|
||||
|
||||
Will return inaccurate results for rooms with partial state, since the state for
|
||||
the forward extremities of those rooms will exclude most members. We may also
|
||||
calculate room state incorrectly for such rooms and believe that a member is or
|
||||
is not in the room when the opposite is true.
|
||||
"""
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_users_in_room", self.get_users_in_room_txn, room_id
|
||||
)
|
||||
@@ -770,6 +778,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
_get_users_server_still_shares_room_with_txn,
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_rooms_for_user(
|
||||
self, user_id: str, on_invalidate: Optional[Callable[[], None]] = None
|
||||
) -> FrozenSet[str]:
|
||||
@@ -1020,6 +1029,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
longest is good because they're most likely to have anything we ask
|
||||
about.
|
||||
|
||||
Uses `m.room.member`s in the room state at the current forward extremities to
|
||||
determine which hosts are in the room.
|
||||
|
||||
Will return inaccurate results for rooms with partial state, since the state for
|
||||
the forward extremities of those rooms will exclude most members. We may also
|
||||
calculate room state incorrectly for such rooms and believe that a host is or
|
||||
is not in the room when the opposite is true.
|
||||
|
||||
Returns:
|
||||
Returns a list of servers sorted by longest in the room first. (aka.
|
||||
sorted by join with the lowest depth first).
|
||||
@@ -1042,6 +1059,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
# We use a `Set` just for fast lookups
|
||||
domain_set: Set[str] = set()
|
||||
for u in users:
|
||||
if ":" not in u:
|
||||
continue
|
||||
domain = get_domain_from_id(u)
|
||||
if domain not in domain_set:
|
||||
domain_set.add(domain)
|
||||
@@ -1075,7 +1094,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
ORDER BY min(e.depth) ASC;
|
||||
"""
|
||||
txn.execute(sql, (room_id,))
|
||||
return [d for d, in txn]
|
||||
# `server_domain` will be `NULL` for malformed MXIDs with no colons.
|
||||
return [d for d, in txn if d is not None]
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_current_hosts_in_room", get_current_hosts_in_room_txn
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@@ -36,6 +37,7 @@ from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, JsonMapping, StateMap
|
||||
from synapse.util.caches import intern_string
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -142,6 +144,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return room_version
|
||||
|
||||
@trace
|
||||
async def get_metadata_for_events(
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, EventMetadata]:
|
||||
@@ -281,6 +284,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
)
|
||||
|
||||
# FIXME: how should this be cached?
|
||||
@cancellable
|
||||
async def get_partial_filtered_current_state_ids(
|
||||
self, room_id: str, state_filter: Optional[StateFilter] = None
|
||||
) -> StateMap[str]:
|
||||
|
||||
@@ -446,59 +446,41 @@ class StatsStore(StateDeltasStore):
|
||||
absolutes: Absolute (set) fields
|
||||
additive_relatives: Fields that will be added onto if existing row present.
|
||||
"""
|
||||
if self.database_engine.can_native_upsert:
|
||||
absolute_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s" % {"field": field}
|
||||
for field in absolutes.keys()
|
||||
]
|
||||
absolute_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s" % {"field": field}
|
||||
for field in absolutes.keys()
|
||||
]
|
||||
|
||||
relative_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
|
||||
% {"table": table, "field": field}
|
||||
for field in additive_relatives.keys()
|
||||
]
|
||||
relative_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
|
||||
% {"table": table, "field": field}
|
||||
for field in additive_relatives.keys()
|
||||
]
|
||||
|
||||
insert_cols = []
|
||||
qargs = []
|
||||
insert_cols = []
|
||||
qargs = []
|
||||
|
||||
for (key, val) in chain(
|
||||
keyvalues.items(), absolutes.items(), additive_relatives.items()
|
||||
):
|
||||
insert_cols.append(key)
|
||||
qargs.append(val)
|
||||
for (key, val) in chain(
|
||||
keyvalues.items(), absolutes.items(), additive_relatives.items()
|
||||
):
|
||||
insert_cols.append(key)
|
||||
qargs.append(val)
|
||||
|
||||
sql = """
|
||||
INSERT INTO %(table)s (%(insert_cols_cs)s)
|
||||
VALUES (%(insert_vals_qs)s)
|
||||
ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
|
||||
""" % {
|
||||
"table": table,
|
||||
"insert_cols_cs": ", ".join(insert_cols),
|
||||
"insert_vals_qs": ", ".join(
|
||||
["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
|
||||
),
|
||||
"key_columns": ", ".join(keyvalues),
|
||||
"updates": ", ".join(chain(absolute_updates, relative_updates)),
|
||||
}
|
||||
sql = """
|
||||
INSERT INTO %(table)s (%(insert_cols_cs)s)
|
||||
VALUES (%(insert_vals_qs)s)
|
||||
ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
|
||||
""" % {
|
||||
"table": table,
|
||||
"insert_cols_cs": ", ".join(insert_cols),
|
||||
"insert_vals_qs": ", ".join(
|
||||
["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
|
||||
),
|
||||
"key_columns": ", ".join(keyvalues),
|
||||
"updates": ", ".join(chain(absolute_updates, relative_updates)),
|
||||
}
|
||||
|
||||
txn.execute(sql, qargs)
|
||||
else:
|
||||
self.database_engine.lock_table(txn, table)
|
||||
retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
|
||||
current_row = self.db_pool.simple_select_one_txn(
|
||||
txn, table, keyvalues, retcols, allow_none=True
|
||||
)
|
||||
if current_row is None:
|
||||
merged_dict = {**keyvalues, **absolutes, **additive_relatives}
|
||||
self.db_pool.simple_insert_txn(txn, table, merged_dict)
|
||||
else:
|
||||
for (key, val) in additive_relatives.items():
|
||||
if current_row[key] is None:
|
||||
current_row[key] = val
|
||||
else:
|
||||
current_row[key] += val
|
||||
current_row.update(absolutes)
|
||||
self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
|
||||
txn.execute(sql, qargs)
|
||||
|
||||
async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None:
|
||||
"""Calculate and insert an entry into room_stats_current.
|
||||
|
||||
@@ -72,6 +72,7 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import PersistedEventPosition, RoomStreamToken
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -597,6 +598,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return ret, key
|
||||
|
||||
@cancellable
|
||||
async def get_membership_changes_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
|
||||
@@ -221,25 +221,15 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
retry_interval: how long until next retry in ms
|
||||
"""
|
||||
|
||||
if self.database_engine.can_native_upsert:
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_native,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
db_autocommit=True, # Safe as its a single upsert
|
||||
)
|
||||
else:
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_emulated,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
)
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_native,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
db_autocommit=True, # Safe as it's a single upsert
|
||||
)
|
||||
|
||||
def _set_destination_retry_timings_native(
|
||||
self,
|
||||
@@ -249,8 +239,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
retry_last_ts: int,
|
||||
retry_interval: int,
|
||||
) -> None:
|
||||
assert self.database_engine.can_native_upsert
|
||||
|
||||
# Upsert retry time interval if retry_interval is zero (i.e. we're
|
||||
# resetting it) or greater than the existing retry interval.
|
||||
#
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user