Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 73f21a1686 | |||
| da41a7cd61 | |||
| ebfeac7c5d | |||
| 4c4889cac0 | |||
| a911ffb42c | |||
| f694bb71b7 | |||
| 3d9f82efcb | |||
| c85c5ace52 | |||
| f2d2481e56 | |||
| 69fa29700e | |||
| 5261d2e2e8 | |||
| f799eac7ea | |||
| 906cead9ca | |||
| 89e8b98b65 | |||
| 8ef0c8ff14 | |||
| cf11919ddd | |||
| 526f84bc2e | |||
| 1cc729c177 | |||
| b7e4bfd005 | |||
| d4d3249ded | |||
| 8d7fcf9b76 | |||
| dc0e896b68 | |||
| c46fecd1f2 | |||
| 77f3986451 | |||
| b58386e37e | |||
| d3d9ca156e | |||
| c2fe48a6ff | |||
| bb5b47b62a | |||
| 26bc26586b | |||
| c9b7e97355 | |||
| 3d20115115 | |||
| a4ecb8e353 | |||
| b5effc7201 | |||
| b455c2a5ec | |||
| 32fc3b7ba4 | |||
| 8edf3f66d5 | |||
| c7b18d9d44 | |||
| 8cb9261598 | |||
| 898fef2789 | |||
| ad7fc8e92f | |||
| 877bdfa889 | |||
| 36b184b782 |
Executable
+128
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Calculate the trial jobs to run based on if we're in a PR or not.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_sqlite_tests.extend(
|
||||
{
|
||||
"python-version": version,
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "10",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
]
|
||||
|
||||
print("::group::Calculated trial jobs")
|
||||
print(
|
||||
json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
|
||||
)
|
||||
)
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
|
||||
)
|
||||
print(f"::set-output name=trial_test_matrix::{test_matrix}")
|
||||
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
print("::group::Calculated sytest jobs")
|
||||
print(json.dumps(sytest_tests, indent=4))
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(sytest_tests)
|
||||
print(f"::set-output name=sytest_test_matrix::{test_matrix}")
|
||||
@@ -5,18 +5,8 @@
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
# - invokes `trial` to run the tests with old deps.
|
||||
|
||||
# Prevent tzdata from asking for user input
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
python3 python3-dev python3-pip python3-venv pipx \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
@@ -33,12 +23,6 @@ export VIRTUALENV_NO_DOWNLOAD=1
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
|
||||
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
|
||||
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
|
||||
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
|
||||
# runs on 3.8 (#12343).
|
||||
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e '/^python = "^/!s/\^/==/g' \
|
||||
@@ -55,7 +39,7 @@ sed -i \
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install --user toml
|
||||
pip install toml wheel
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
@@ -69,8 +53,8 @@ with open('pyproject.toml', 'w') as f:
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pipx install poetry==1.1.14
|
||||
~/.local/bin/poetry lock
|
||||
pip install poetry==1.2.0
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
@@ -78,6 +62,3 @@ echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
~/.local/bin/poetry install -E "all test"
|
||||
~/.local/bin/poetry run trial --jobs=2 tests
|
||||
@@ -4,8 +4,12 @@
|
||||
# things to include
|
||||
!docker
|
||||
!synapse
|
||||
!rust
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
|
||||
**/__pycache__
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
# As an overview this workflow:
|
||||
# - checks out develop,
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - runs mypy and test suites in that checkout.
|
||||
#
|
||||
# Based on the twisted trunk CI job.
|
||||
@@ -26,12 +26,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0b1"
|
||||
poetry-version: "1.2.0"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -53,6 +60,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
@@ -69,6 +84,12 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
- run: python -m twisted.trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -113,6 +134,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
run: rm /src/poetry.lock
|
||||
@@ -187,4 +216,3 @@ jobs:
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/latest_deps_build_failed_issue_template.md
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ on:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
@@ -89,9 +89,67 @@ jobs:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-10.15"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
# Only build a single wheel in CI.
|
||||
- name: Set env vars.
|
||||
run: |
|
||||
echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Wheel
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
name: "Build pypi distribution files"
|
||||
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
|
||||
name: Build sdist
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- run: pip install build
|
||||
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
@@ -99,6 +157,7 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
needs:
|
||||
- build-debs
|
||||
- build-wheels
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
+175
-75
@@ -10,6 +10,23 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
|
||||
# don't modify Rust code.
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
with:
|
||||
filters: |
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -65,71 +82,100 @@ jobs:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
lint-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta]
|
||||
needs:
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
calculate-test-jobs:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
|
||||
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: calculate-test-jobs
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
extras: ["all"]
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
# If we're a PR then we only test min and max python.
|
||||
exclude:
|
||||
- is_pr: true
|
||||
python-version: 3.8
|
||||
- is_pr: true
|
||||
python-version: 3.9
|
||||
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
extras: "all"
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
extras: ${{ matrix.job.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
@@ -150,16 +196,54 @@ jobs:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
# Calculating the old-deps actually takes a bunch of time, so we cache the
|
||||
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
|
||||
# otherwise the `poetry install` step will error due to the poetry.lock
|
||||
# file being outdated.
|
||||
#
|
||||
# This caches the output of `Prepare old deps`, which should generate the
|
||||
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry-old-deps
|
||||
name: Cache poetry.lock
|
||||
with:
|
||||
path: |
|
||||
poetry.lock
|
||||
pyproject.toml
|
||||
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
run: .ci/scripts/prepare_old_deps.sh
|
||||
|
||||
# We only now install poetry so that `setup-python-poetry` caches the
|
||||
# right poetry.lock's dependencies.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -208,50 +292,37 @@ jobs:
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
needs: calculate-test-jobs
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.job.workers && 1 }}
|
||||
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
@@ -262,7 +333,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
@@ -358,6 +429,13 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -367,6 +445,25 @@ jobs:
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
cargo-test:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
@@ -381,6 +478,7 @@ jobs:
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
- cargo-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
@@ -388,5 +486,7 @@ jobs:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
skippable:
|
||||
# Cargo test is skipped if there is no changes on Rust code
|
||||
skippable: |
|
||||
lint-newsfile
|
||||
cargo-test
|
||||
|
||||
@@ -16,6 +16,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -34,6 +42,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -66,6 +82,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
|
||||
@@ -60,3 +60,10 @@ book/
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
/synapse/*.so
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
# We make the whole Synapse folder a workspace so that we can run `cargo`
|
||||
# commands from the root (rather than having to cd into rust/).
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
+1
-1
@@ -3,7 +3,7 @@ Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development began in 2014,
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
# A build script for poetry that adds the rust extension.
|
||||
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from setuptools_rust import Binding, RustExtension
|
||||
|
||||
|
||||
def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
original_project_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
|
||||
|
||||
extension = RustExtension(
|
||||
target="synapse.synapse_rust",
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -0,0 +1 @@
|
||||
Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).
|
||||
@@ -0,0 +1 @@
|
||||
Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis.
|
||||
@@ -0,0 +1 @@
|
||||
Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats.
|
||||
@@ -0,0 +1 @@
|
||||
Add admin APIs to fetch messages within a particular window of time.
|
||||
@@ -0,0 +1 @@
|
||||
Update docs to make enabling metrics more clear.
|
||||
@@ -0,0 +1 @@
|
||||
Cancel the processing of key query requests when they time out.
|
||||
@@ -0,0 +1 @@
|
||||
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).
|
||||
@@ -0,0 +1 @@
|
||||
Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas.
|
||||
@@ -0,0 +1 @@
|
||||
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.
|
||||
@@ -0,0 +1 @@
|
||||
Update trial old deps CI to use poetry 1.2.0.
|
||||
@@ -0,0 +1 @@
|
||||
Fix typechecking with latest types-jsonschema.
|
||||
@@ -0,0 +1 @@
|
||||
Reduce number of CI checks we run for PRs.
|
||||
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
@@ -0,0 +1 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
@@ -0,0 +1 @@
|
||||
Fix typechecking with latest types-jsonschema.
|
||||
@@ -0,0 +1 @@
|
||||
Update trial old deps CI to use poetry 1.2.0.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.
|
||||
@@ -0,0 +1 @@
|
||||
Define Synapse's compatability policy for SQLite versions.
|
||||
@@ -0,0 +1 @@
|
||||
Strip number suffix from instance name to consolidate services that traces are spread over.
|
||||
@@ -0,0 +1 @@
|
||||
Instrument `get_metadata_for_events` for understandable traces in Jaeger.
|
||||
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug where Synapse fails to start if a signing key file contains an empty line.
|
||||
@@ -0,0 +1 @@
|
||||
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.
|
||||
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -0,0 +1 @@
|
||||
Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar).
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.
|
||||
@@ -0,0 +1 @@
|
||||
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.
|
||||
@@ -0,0 +1 @@
|
||||
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.
|
||||
@@ -0,0 +1 @@
|
||||
User an additional database query when persisting receipts.
|
||||
@@ -0,0 +1 @@
|
||||
Re-type hint some collections as read-only.
|
||||
@@ -0,0 +1 @@
|
||||
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.
|
||||
@@ -0,0 +1 @@
|
||||
Add a check for editable installs if the Rust library needs rebuilding.
|
||||
@@ -0,0 +1 @@
|
||||
Synapse will now refuse to start if configured to use SQLite < 3.27.
|
||||
@@ -0,0 +1 @@
|
||||
Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance.
|
||||
@@ -0,0 +1 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -0,0 +1 @@
|
||||
Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar).
|
||||
@@ -335,7 +335,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "Events",
|
||||
@@ -1423,7 +1423,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -1804,7 +1804,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
|
||||
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -2437,7 +2437,7 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds{instance=\"$instance\"}[1m]))",
|
||||
"expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -2451,7 +2451,7 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds{instance=\"$instance\"}[1m]))",
|
||||
"expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
@@ -3425,7 +3425,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3518,7 +3518,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3726,7 +3726,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "successful txn rate",
|
||||
@@ -3736,7 +3736,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"legendFormat": "failed txn rate",
|
||||
"refId": "B"
|
||||
}
|
||||
@@ -3826,7 +3826,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "pdus",
|
||||
@@ -3836,7 +3836,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "edus",
|
||||
@@ -3928,7 +3928,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3939,7 +3939,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "edus",
|
||||
@@ -5042,7 +5042,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5054,7 +5054,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
|
||||
"expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "failed {{job}}",
|
||||
@@ -5268,12 +5268,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{index}}",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
}
|
||||
@@ -5369,12 +5369,12 @@
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{index}}",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
}
|
||||
@@ -5475,12 +5475,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5490,7 +5490,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5598,12 +5598,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5613,7 +5613,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -5719,12 +5719,12 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Hit Rate",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
|
||||
"metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
@@ -5734,7 +5734,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6087,7 +6087,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6187,7 +6187,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
@@ -6287,7 +6287,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
@@ -6538,7 +6538,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6636,7 +6636,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
|
||||
"expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6737,7 +6737,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": true,
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6839,7 +6839,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -6936,7 +6936,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7033,7 +7033,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7122,7 +7122,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{job}}-{{index}} {{block_name}}",
|
||||
"refId": "A"
|
||||
@@ -7246,7 +7246,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
@@ -7347,7 +7347,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
@@ -7447,7 +7447,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7547,7 +7547,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@@ -7643,7 +7643,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -7763,7 +7763,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
|
||||
"interval": "",
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
@@ -7853,7 +7853,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
@@ -9556,7 +9556,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9716,7 +9716,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9793,7 +9793,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
@@ -9803,7 +9803,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
@@ -9813,7 +9813,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
@@ -9823,7 +9823,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
@@ -9905,7 +9905,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
|
||||
"expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
|
||||
"format": "heatmap",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{le}}",
|
||||
@@ -9982,7 +9982,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
@@ -9992,7 +9992,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
@@ -10002,7 +10002,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
@@ -10012,7 +10012,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
|
||||
"expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
@@ -10297,7 +10297,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "State res ",
|
||||
"refId": "A"
|
||||
@@ -10306,7 +10306,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "Potential to prune",
|
||||
"refId": "B"
|
||||
@@ -10315,7 +10315,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "Pruned",
|
||||
"refId": "C"
|
||||
@@ -11069,7 +11069,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Notified",
|
||||
"refId": "A"
|
||||
@@ -11078,7 +11078,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Remote ping",
|
||||
"refId": "B"
|
||||
@@ -11087,7 +11087,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Total updates",
|
||||
"refId": "C"
|
||||
@@ -11096,7 +11096,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Remote updates",
|
||||
"refId": "D"
|
||||
@@ -11105,7 +11105,7 @@
|
||||
"datasource": {
|
||||
"uid": "$datasource"
|
||||
},
|
||||
"expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "Bump active time",
|
||||
"refId": "E"
|
||||
@@ -11789,7 +11789,7 @@
|
||||
"name": "instance",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, instance)",
|
||||
"refId": "Prometheus-instance-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11818,7 +11818,7 @@
|
||||
"name": "job",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, job)",
|
||||
"refId": "Prometheus-job-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11848,7 +11848,7 @@
|
||||
"name": "index",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)",
|
||||
"query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, index)",
|
||||
"refId": "Prometheus-index-Variable-Query"
|
||||
},
|
||||
"refresh": 2,
|
||||
@@ -11896,6 +11896,6 @@
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 132,
|
||||
"version": 133,
|
||||
"weekStart": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,55 +1,35 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
expr: 'synapse_federation_client_sent_edus_total + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
expr: 'synapse_federation_server_received_edus_total + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
expr: 'synapse_federation_server_received_pdus_total + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
# These 2 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -59,20 +39,25 @@ groups:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
# These 3 rules are used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
|
||||
Vendored
+6
-1
@@ -61,7 +61,7 @@ dh_virtualenv \
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
@@ -78,9 +78,14 @@ case "$DEB_BUILD_OPTIONS" in
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
# To avoid pulling in the unbuilt Synapse in the local directory
|
||||
pushd /
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
popd
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
Vendored
+4
@@ -12,11 +12,15 @@ matrix-synapse-py3 (1.66.0) stable; urgency=medium
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
[ Jörg Behrmann ]
|
||||
* Update debhelper to compatibility level 12.
|
||||
* Drop the preinst script stopping synapse.
|
||||
* Allocate a group for the system user.
|
||||
* Change dpkg-statoverride to --force-statoverride-add.
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Disable `dh_auto_configure` as it broke during Rust build.
|
||||
|
||||
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
|
||||
|
||||
Vendored
+2
@@ -12,6 +12,8 @@ override_dh_installsystemd:
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
override_dh_auto_configure:
|
||||
|
||||
# many libraries pulled from PyPI have allocatable sections after
|
||||
# non-allocatable ones on which dwz errors out. For those without the issue the
|
||||
# gains are only marginal
|
||||
|
||||
+12
-2
@@ -92,11 +92,20 @@ RUN \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
@@ -108,8 +117,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY rust /synapse/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst /synapse/
|
||||
COPY pyproject.toml README.rst build_rust.py /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
@@ -72,6 +72,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
@@ -85,6 +86,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
libpq-dev \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
|
||||
@@ -393,6 +393,151 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
# Room Messages API
|
||||
|
||||
The Room Messages admin API allows server admins to get all messages
|
||||
sent to a room in a given timeframe. There are various parameters available
|
||||
that allow for filtering and ordering the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/messages
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish you fetch messages from.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
|
||||
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
|
||||
* `to` - The token to spot returning events at.
|
||||
* `limit` - The maximum number of events to return. Defaults to `10`.
|
||||
* `filter` - A JSON RoomEventFilter to filter returned events with.
|
||||
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `chunk` - A list of room events. The order depends on the dir parameter.
|
||||
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
|
||||
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
|
||||
If no further events are available, this property is omitted from the response.
|
||||
* `start` - A token corresponding to the start of chunk.
|
||||
* `state` - A list of state events relevant to showing the chunk.
|
||||
|
||||
**Example**
|
||||
|
||||
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
```json
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>",
|
||||
"msgtype": "m.text"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"name": "The room name"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"state_key": "",
|
||||
"type": "m.room.name",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"body": "Gangnam Style",
|
||||
"info": {
|
||||
"duration": 2140786,
|
||||
"h": 320,
|
||||
"mimetype": "video/mp4",
|
||||
"size": 1563685,
|
||||
"thumbnail_info": {
|
||||
"h": 300,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 46144,
|
||||
"w": 300
|
||||
},
|
||||
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
|
||||
"w": 480
|
||||
},
|
||||
"msgtype": "m.video",
|
||||
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"end": "t47409-4357353_219380_26003_2265",
|
||||
"start": "t47429-4392820_219380_26003_2265"
|
||||
}
|
||||
```
|
||||
|
||||
# Room Timestamp to Event API
|
||||
|
||||
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish to check.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `ts` - a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
* `event_id` - converted from timestamp
|
||||
|
||||
# Block Room API
|
||||
The Block Room admin API allows server admins to block and unblock rooms,
|
||||
and query to see if a given room is blocked.
|
||||
|
||||
@@ -42,6 +42,7 @@ It returns a JSON body like the following:
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
"consent_ts": null,
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
@@ -364,6 +365,7 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
- Remove user's consent information (consent version and timestamp)
|
||||
|
||||
## Reset password
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
@@ -17,6 +17,14 @@ Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at [https://endoflife.date/python](https://endoflife.date/python) and
|
||||
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
|
||||
|
||||
A Rust compiler is required to build Synapse from source. For any given release
|
||||
the minimum required version may be bumped up to a recent Rust version, and so
|
||||
people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
-------
|
||||
@@ -31,3 +39,15 @@ long process.
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
|
||||
For Rust, the situation is a bit different given that a) the Rust foundation
|
||||
does not generally support older Rust versions, and b) the library ecosystem
|
||||
generally bump their minimum support Rust versions frequently. In general, the
|
||||
Synapse team will try to avoid updating the dependency on Rust to the absolute
|
||||
latest version, but introducing a formal policy is hard given the constraints of
|
||||
the ecosystem.
|
||||
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
@@ -28,6 +28,9 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
@@ -114,6 +117,11 @@ Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
When changes are made to any Rust code then you must call either `poetry install`
|
||||
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
|
||||
is quicker than `poetry install`, so is recommended when making frequent
|
||||
changes to the Rust code.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
@@ -195,7 +203,7 @@ The database file can then be inspected with:
|
||||
sqlite3 _trial_temp/test.db
|
||||
```
|
||||
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
will always only contain the data generated by the *last run test*. Though generally
|
||||
when debugging, one is only running a single test anyway.
|
||||
|
||||
|
||||
@@ -191,3 +191,27 @@ There are three separate aspects to this:
|
||||
flavour will be accepted by SQLite 3.22, but will give a column whose
|
||||
default value is the **string** `"FALSE"` - which, when cast back to a boolean
|
||||
in Python, evaluates to `True`.
|
||||
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
In room versions `1` and `2` it's possible to end up with two events with the
|
||||
same `event_id` (in the same or different rooms). After room version `3`, that
|
||||
can only happen with a hash collision, which we basically hope will never
|
||||
happen.
|
||||
|
||||
There are several places in Synapse and even Matrix APIs like [`GET
|
||||
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
|
||||
where we assume that event IDs are globally unique.
|
||||
|
||||
But hash collisions are still possible, and by treating event IDs as room
|
||||
scoped, we can reduce the possibility of a hash collision. When scoping
|
||||
`event_id` in the database schema, it should be also accompanied by `room_id`
|
||||
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
|
||||
`(room_id, event_id)`.
|
||||
|
||||
There has been a lot of debate on this in places like
|
||||
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
|
||||
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
|
||||
has no resolution yet (as of 2022-09-01).
|
||||
|
||||
|
||||
@@ -7,7 +7,13 @@
|
||||
|
||||
1. Enable Synapse metrics:
|
||||
|
||||
There are two methods of enabling metrics in Synapse.
|
||||
In `homeserver.yaml`, make sure `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
|
||||
collect data:
|
||||
|
||||
There are two methods of enabling the metrics endpoint in Synapse.
|
||||
|
||||
The first serves the metrics as a part of the usual web server and
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
@@ -41,9 +47,6 @@
|
||||
- '0.0.0.0'
|
||||
```
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Restart Synapse.
|
||||
|
||||
1. Add a Prometheus target for Synapse.
|
||||
|
||||
@@ -196,6 +196,10 @@ System requirements:
|
||||
- Python 3.7 or later, up to Python 3.10.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
unavailable, you will need to have a recent Rust compiler installed. The easiest
|
||||
way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -91,6 +91,21 @@ process, for example:
|
||||
|
||||
# Upgrading to v1.67.0
|
||||
|
||||
## Direct TCP replication is no longer supported: migrate to Redis
|
||||
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which was deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
CPU saving on the main process and is a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
To migrate to Redis add the [`redis` config](./workers.md#shared-configuration),
|
||||
and remove the TCP `replication` listener from config of the master and
|
||||
`worker_replication_port` from worker config. Note that a HTTP listener with a
|
||||
`replication` resource is still required.
|
||||
|
||||
## Minimum version of Poetry is now v1.2.0
|
||||
|
||||
The minimum supported version of poetry is now 1.2. This should only affect
|
||||
|
||||
@@ -431,8 +431,6 @@ Sub-options for each listener include:
|
||||
|
||||
* `metrics`: (see the docs [here](../../metrics-howto.md)),
|
||||
|
||||
* `replication`: (deprecated as of Synapse 1.18, see the docs [here](../../workers.md)).
|
||||
|
||||
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
|
||||
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
|
||||
@@ -1071,8 +1069,10 @@ Options related to caching.
|
||||
---
|
||||
### `event_cache_size`
|
||||
|
||||
The number of events to cache in memory. Not affected by
|
||||
`caches.global_factor` and is not part of the `caches` section. Defaults to 10K.
|
||||
The number of events to cache in memory. Defaults to 10K. Like other caches,
|
||||
this is affected by `caches.global_factor` (see below).
|
||||
|
||||
Note that this option is not part of the `caches` section.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
|
||||
+5
-17
@@ -32,13 +32,8 @@ stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
All the workers and the main process connect to Redis, which relays replication
|
||||
commands between processes.
|
||||
|
||||
If Redis support is enabled Synapse will use it as a shared cache, as well as a
|
||||
pub/sub mechanism.
|
||||
@@ -330,7 +325,6 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
(This is only supported with Redis-based replication.)
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
@@ -600,15 +594,9 @@ equivalent to `synapse.app.generic_worker`:
|
||||
|
||||
## Migration from old config
|
||||
|
||||
There are two main independent changes that have been made: introducing Redis
|
||||
support and merging apps into `synapse.app.generic_worker`. Both these changes
|
||||
are backwards compatible and so no changes to the config are required, however
|
||||
server admins are encouraged to plan to migrate to Redis as the old style direct
|
||||
TCP replication config is deprecated.
|
||||
|
||||
To migrate to Redis add the `redis` config as above, and optionally remove the
|
||||
TCP `replication` listener from master and `worker_replication_port` from worker
|
||||
config.
|
||||
A main change that has occurred is the merging of worker apps into
|
||||
`synapse.app.generic_worker`. This change is backwards compatible and so no
|
||||
changes to the config are required.
|
||||
|
||||
To migrate apps to use `synapse.app.generic_worker` simply update the
|
||||
`worker_app` option in the worker configs, and where worker are started (e.g.
|
||||
|
||||
@@ -16,7 +16,8 @@ files =
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
synapse/,
|
||||
tests/
|
||||
tests/,
|
||||
build_rust.py
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
@@ -181,3 +182,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
Generated
+34
-1
@@ -1035,6 +1035,18 @@ python-versions = ">=3.6"
|
||||
cryptography = ">=2.0"
|
||||
jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "semantic-version"
|
||||
version = "2.10.0"
|
||||
description = "A library implementing the 'SemVer' scheme."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7"
|
||||
|
||||
[package.extras]
|
||||
dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
|
||||
doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
@@ -1099,6 +1111,19 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-g
|
||||
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools-rust"
|
||||
version = "1.5.1"
|
||||
description = "Setuptools Rust extension plugin"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
semantic-version = ">=2.8.2,<3"
|
||||
setuptools = ">=62.4"
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[[package]]
|
||||
name = "signedjson"
|
||||
version = "1.1.4"
|
||||
@@ -1600,7 +1625,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d"
|
||||
content-hash = "79cfa09d59f9f8b5ef24318fb860df1915f54328692aa56d04331ecbdd92a8cb"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2472,6 +2497,10 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
semantic-version = [
|
||||
{file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
|
||||
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
@@ -2484,6 +2513,10 @@ setuptools = [
|
||||
{file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
|
||||
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
|
||||
]
|
||||
setuptools-rust = [
|
||||
{file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
|
||||
{file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
|
||||
]
|
||||
signedjson = [
|
||||
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
|
||||
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
|
||||
|
||||
+39
-2
@@ -52,6 +52,9 @@ include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
|
||||
[tool.maturin]
|
||||
manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.66.0"
|
||||
@@ -82,7 +85,16 @@ include = [
|
||||
{ path = "sytest-blacklist", format = "sdist" },
|
||||
{ path = "tests", format = "sdist" },
|
||||
{ path = "UPGRADE.rst", format = "sdist" },
|
||||
{ path = "Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.lock", format = "sdist" },
|
||||
{ path = "rust/src/**", format = "sdist" },
|
||||
]
|
||||
exclude = [
|
||||
{ path = "synapse/*.so", format = "sdist"}
|
||||
]
|
||||
|
||||
build = "build_rust.py"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
@@ -126,7 +138,7 @@ pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.11"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.0"
|
||||
bcrypt = ">=3.1.7"
|
||||
Pillow = ">=5.4.0"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
pymacaroons = ">=0.13.0"
|
||||
@@ -161,6 +173,15 @@ importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --no-dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
@@ -285,5 +306,21 @@ twine = "*"
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
# can end up polluting the next build with a .so that is for the wrong
|
||||
# Python version.
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
# We name the package `synapse` so that things like logging have the right
|
||||
# logging target.
|
||||
name = "synapse"
|
||||
|
||||
# dummy version. See pyproject.toml for the Synapse's version number.
|
||||
version = "0.1.0"
|
||||
|
||||
edition = "2021"
|
||||
rust-version = "1.61.0"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[package.metadata.maturin]
|
||||
# This is where we tell maturin where to place the built library.
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
|
||||
|
||||
[build-dependencies]
|
||||
blake2 = "0.10.4"
|
||||
hex = "0.4.3"
|
||||
@@ -0,0 +1,45 @@
|
||||
//! This build script calculates the hash of all files in the `src/`
|
||||
//! directory and adds it as an environment variable during build time.
|
||||
//!
|
||||
//! This is used so that the python code can detect when the built native module
|
||||
//! does not match the source in-tree, helping to detect the case where the
|
||||
//! source has been updated but the library hasn't been rebuilt.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use blake2::{Blake2b512, Digest};
|
||||
|
||||
fn main() -> Result<(), std::io::Error> {
|
||||
let mut dirs = vec![PathBuf::from("src")];
|
||||
|
||||
let mut paths = Vec::new();
|
||||
while let Some(path) = dirs.pop() {
|
||||
let mut entries = std::fs::read_dir(path)?
|
||||
.map(|res| res.map(|e| e.path()))
|
||||
.collect::<Result<Vec<_>, std::io::Error>>()?;
|
||||
|
||||
entries.sort();
|
||||
|
||||
for entry in entries {
|
||||
if entry.is_dir() {
|
||||
dirs.push(entry)
|
||||
} else {
|
||||
paths.push(entry.to_str().expect("valid rust paths").to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
paths.sort();
|
||||
|
||||
let mut hasher = Blake2b512::new();
|
||||
|
||||
for path in paths {
|
||||
let bytes = std::fs::read(path)?;
|
||||
hasher.update(bytes);
|
||||
}
|
||||
|
||||
let hex_digest = hex::encode(hasher.finalize());
|
||||
println!("cargo:rustc-env=SYNAPSE_RUST_DIGEST={hex_digest}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
use pyo3::prelude::*;
|
||||
|
||||
/// Returns the hash of all the rust source files at the time it was compiled.
|
||||
///
|
||||
/// Used by python to detect if the rust library is outdated.
|
||||
#[pyfunction]
|
||||
fn get_rust_file_digest() -> &'static str {
|
||||
env!("SYNAPSE_RUST_DIGEST")
|
||||
}
|
||||
|
||||
/// Formats the sum of two numbers as string.
|
||||
#[pyfunction]
|
||||
#[pyo3(text_signature = "(a, b, /)")]
|
||||
fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
|
||||
Ok((a + b).to_string())
|
||||
}
|
||||
|
||||
/// The entry point for defining the Python module.
|
||||
#[pymodule]
|
||||
fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
|
||||
Ok(())
|
||||
}
|
||||
+142
-4
@@ -18,10 +18,12 @@
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from os import path
|
||||
from tempfile import TemporaryDirectory
|
||||
@@ -71,18 +73,21 @@ def cli() -> None:
|
||||
|
||||
./scripts-dev/release.py tag
|
||||
|
||||
# ... wait for assets to build ...
|
||||
# wait for assets to build, either manually or with:
|
||||
./scripts-dev/release.py wait-for-actions
|
||||
|
||||
./scripts-dev/release.py publish
|
||||
|
||||
./scripts-dev/release.py upload
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
|
||||
./scripts-dev/release.py merge-back
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
./scripts-dev/release.py announce
|
||||
|
||||
Alternatively, `./scripts-dev/release.py full` will do all the above
|
||||
as well as guiding you through the manual steps.
|
||||
|
||||
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
|
||||
`tag`/`publish` command, then a new draft release will be created/published.
|
||||
"""
|
||||
@@ -90,6 +95,10 @@ def cli() -> None:
|
||||
|
||||
@cli.command()
|
||||
def prepare() -> None:
|
||||
_prepare()
|
||||
|
||||
|
||||
def _prepare() -> None:
|
||||
"""Do the initial stages of creating a release, including creating release
|
||||
branch, updating changelog and pushing to GitHub.
|
||||
"""
|
||||
@@ -284,6 +293,10 @@ def prepare() -> None:
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
|
||||
def tag(gh_token: Optional[str]) -> None:
|
||||
_tag(gh_token)
|
||||
|
||||
|
||||
def _tag(gh_token: Optional[str]) -> None:
|
||||
"""Tags the release and generates a draft GitHub release"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
@@ -374,6 +387,10 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def publish(gh_token: str) -> None:
|
||||
_publish(gh_token)
|
||||
|
||||
|
||||
def _publish(gh_token: str) -> None:
|
||||
"""Publish release on GitHub."""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
@@ -411,6 +428,10 @@ def publish(gh_token: str) -> None:
|
||||
|
||||
@cli.command()
|
||||
def upload() -> None:
|
||||
_upload()
|
||||
|
||||
|
||||
def _upload() -> None:
|
||||
"""Upload release to pypi."""
|
||||
|
||||
current_version = get_package_version()
|
||||
@@ -479,8 +500,75 @@ def _merge_into(repo: Repo, source: str, target: str) -> None:
|
||||
repo.remote().push()
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
|
||||
def wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
_wait_for_actions(gh_token)
|
||||
|
||||
|
||||
def _wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
# Find out the version and tag name.
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
# Authentication is optional on this endpoint,
|
||||
# but use a token if we have one to reduce the chance of being rate-limited.
|
||||
url = f"https://api.github.com/repos/matrix-org/synapse/actions/runs?branch={tag_name}"
|
||||
headers = {"Accept": "application/vnd.github+json"}
|
||||
if gh_token is not None:
|
||||
headers["authorization"] = f"token {gh_token}"
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
|
||||
time.sleep(10 * 60)
|
||||
while True:
|
||||
time.sleep(5 * 60)
|
||||
response = urllib.request.urlopen(req)
|
||||
resp = json.loads(response.read())
|
||||
|
||||
if len(resp["workflow_runs"]) == 0:
|
||||
continue
|
||||
|
||||
if all(
|
||||
workflow["status"] != "in_progress" for workflow in resp["workflow_runs"]
|
||||
):
|
||||
success = (
|
||||
workflow["status"] == "completed" for workflow in resp["workflow_runs"]
|
||||
)
|
||||
if success:
|
||||
_notify("Workflows successful. You can now continue the release.")
|
||||
else:
|
||||
_notify("Workflows failed.")
|
||||
click.confirm("Continue anyway?", abort=True)
|
||||
|
||||
break
|
||||
|
||||
|
||||
def _notify(message: str) -> None:
|
||||
# Send a bell character. Most terminals will play a sound or show a notification
|
||||
# for this.
|
||||
click.echo(f"\a{message}")
|
||||
|
||||
# Try and run notify-send, but don't raise an Exception if this fails
|
||||
# (This is best-effort)
|
||||
# TODO Support other platforms?
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
"Synapse Release Script",
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def merge_back() -> None:
|
||||
_merge_back()
|
||||
|
||||
|
||||
def _merge_back() -> None:
|
||||
"""Merge the release branch back into the appropriate branches.
|
||||
All branches will be automatically pulled from the remote and the results
|
||||
will be pushed to the remote."""
|
||||
@@ -519,6 +607,10 @@ def merge_back() -> None:
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
_announce()
|
||||
|
||||
|
||||
def _announce() -> None:
|
||||
"""Generate markdown to announce the release."""
|
||||
|
||||
current_version = get_package_version()
|
||||
@@ -548,10 +640,56 @@ Announce the release in
|
||||
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
|
||||
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
|
||||
- #synapse-dev:matrix.org
|
||||
- #synapse-package-maintainers:matrix.org"""
|
||||
- #synapse-package-maintainers:matrix.org
|
||||
|
||||
Ask the designated people to do the blog and tweets."""
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def full(gh_token: str) -> None:
|
||||
click.echo("1. If this is a security release, read the security wiki page.")
|
||||
click.echo("2. Check for any release blockers before proceeding.")
|
||||
click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker")
|
||||
|
||||
click.confirm("Ready?", abort=True)
|
||||
|
||||
click.echo("\n*** prepare ***")
|
||||
_prepare()
|
||||
|
||||
click.echo("Deploy to matrix.org and ensure that it hasn't fallen over.")
|
||||
click.echo("Remember to silence the alerts to prevent alert spam.")
|
||||
click.confirm("Deployed?", abort=True)
|
||||
|
||||
click.echo("\n*** tag ***")
|
||||
_tag(gh_token)
|
||||
|
||||
click.echo("\n*** wait for actions ***")
|
||||
_wait_for_actions(gh_token)
|
||||
|
||||
click.echo("\n*** publish ***")
|
||||
_publish(gh_token)
|
||||
|
||||
click.echo("\n*** upload ***")
|
||||
_upload()
|
||||
|
||||
click.echo("\n*** merge back ***")
|
||||
_merge_back()
|
||||
|
||||
click.echo("\nUpdate the Debian repository")
|
||||
click.confirm("Started updating Debian repository?", abort=True)
|
||||
|
||||
click.echo("\nWait for all release methods to be ready.")
|
||||
# Docker should be ready because it was done by the workflows earlier
|
||||
# PyPI should be ready because we just ran upload().
|
||||
# TODO Automatically poll until the Debs have made it to packages.matrix.org
|
||||
click.confirm("Debs ready?", abort=True)
|
||||
|
||||
click.echo("\n*** announce ***")
|
||||
_announce()
|
||||
|
||||
|
||||
def get_package_version() -> version.Version:
|
||||
version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
|
||||
"utf-8"
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
def sum_as_string(a: int, b: int) -> str: ...
|
||||
def get_rust_file_digest() -> str: ...
|
||||
@@ -20,6 +20,8 @@ import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
@@ -78,3 +80,6 @@ if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
from synapse.util.patch_inline_callbacks import do_patch
|
||||
|
||||
do_patch()
|
||||
|
||||
|
||||
check_rust_lib_up_to_date()
|
||||
|
||||
@@ -32,12 +32,14 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import (
|
||||
SynapseTags,
|
||||
active_span,
|
||||
force_tracing,
|
||||
start_active_span,
|
||||
trace,
|
||||
)
|
||||
from synapse.types import Requester, create_requester
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -118,6 +120,7 @@ class Auth:
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -159,6 +162,12 @@ class Auth:
|
||||
parent_span.set_tag(
|
||||
"authenticated_entity", requester.authenticated_entity
|
||||
)
|
||||
# We tag the Synapse instance name so that it's an easy jumping
|
||||
# off point into the logs. Can also be used to filter for an
|
||||
# instance that is under load.
|
||||
parent_span.set_tag(
|
||||
SynapseTags.INSTANCE_NAME, self.hs.get_instance_name()
|
||||
)
|
||||
parent_span.set_tag("user_id", requester.user.to_string())
|
||||
if requester.device_id is not None:
|
||||
parent_span.set_tag("device_id", requester.device_id)
|
||||
@@ -166,6 +175,7 @@ class Auth:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -281,6 +291,7 @@ class Auth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
@@ -523,6 +534,7 @@ class Auth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
@cancellable
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
|
||||
@@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = {
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_room_id")
|
||||
def matrix_room_id_validator(room_id_str: str) -> RoomID:
|
||||
return RoomID.from_string(room_id_str)
|
||||
def matrix_room_id_validator(room_id: object) -> bool:
|
||||
return isinstance(room_id, str) and RoomID.is_valid(room_id)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_user_id")
|
||||
def matrix_user_id_validator(user_id_str: str) -> UserID:
|
||||
return UserID.from_string(user_id_str)
|
||||
def matrix_user_id_validator(user_id: object) -> bool:
|
||||
return isinstance(user_id, str) and UserID.is_valid(user_id)
|
||||
|
||||
|
||||
class Filtering:
|
||||
|
||||
@@ -19,18 +19,23 @@ import attr
|
||||
|
||||
class EventFormatVersions:
|
||||
"""This is an internal enum for tracking the version of the event format,
|
||||
independently from the room version.
|
||||
independently of the room version.
|
||||
|
||||
To reduce confusion, the event format versions are named after the room
|
||||
versions that they were used or introduced in.
|
||||
The concept of an 'event format version' is specific to Synapse (the
|
||||
specification does not mention this term.)
|
||||
"""
|
||||
|
||||
V1 = 1 # $id:server event id format
|
||||
V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
|
||||
V3 = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
|
||||
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
|
||||
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
}
|
||||
|
||||
|
||||
@@ -92,7 +97,7 @@ class RoomVersions:
|
||||
V1 = RoomVersion(
|
||||
"1",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
StateResolutionVersions.V1,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -110,7 +115,7 @@ class RoomVersions:
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -128,7 +133,7 @@ class RoomVersions:
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -146,7 +151,7 @@ class RoomVersions:
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -164,7 +169,7 @@ class RoomVersions:
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -182,7 +187,7 @@ class RoomVersions:
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -200,7 +205,7 @@ class RoomVersions:
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -218,7 +223,7 @@ class RoomVersions:
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -236,7 +241,7 @@ class RoomVersions:
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -254,7 +259,7 @@ class RoomVersions:
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -272,7 +277,7 @@ class RoomVersions:
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -290,7 +295,7 @@ class RoomVersions:
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -308,7 +313,7 @@ class RoomVersions:
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
|
||||
@@ -511,9 +511,10 @@ async def start(hs: "HomeServer") -> None:
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
# If background tasks are running on the main process, start collecting the
|
||||
# phone home stats.
|
||||
# If background tasks are running on the main process or this is the worker in
|
||||
# charge of them, start collecting the phone home stats and shared usage metrics.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
await hs.get_common_usage_metrics_manager().setup()
|
||||
start_phone_stats_home(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
|
||||
@@ -57,7 +57,6 @@ from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.health import HealthResource
|
||||
@@ -290,16 +289,6 @@ class SynapseHomeServer(HomeServer):
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
elif listener.type == "replication":
|
||||
services = listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
ReplicationStreamProtocolFactory(self),
|
||||
)
|
||||
for s in services:
|
||||
self.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", s.stopListening
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
|
||||
@@ -32,15 +32,15 @@ logger = logging.getLogger("synapse.app.homeserver")
|
||||
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU")
|
||||
current_mau_by_service_gauge = Gauge(
|
||||
"synapse_admin_mau_current_mau_by_service",
|
||||
"Current MAU by service",
|
||||
["app_service"],
|
||||
)
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
"synapse_admin_mau_registered_reserved_users",
|
||||
"Registered users with reserved threepids",
|
||||
)
|
||||
|
||||
@@ -51,6 +51,16 @@ async def phone_stats_home(
|
||||
stats: JsonDict,
|
||||
stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process,
|
||||
) -> None:
|
||||
"""Collect usage statistics and send them to the configured endpoint.
|
||||
|
||||
Args:
|
||||
hs: the HomeServer object to use for gathering usage data.
|
||||
stats: the dict in which to store the statistics sent to the configured
|
||||
endpoint. Mostly used in tests to figure out the data that is supposed to
|
||||
be sent.
|
||||
stats_process: statistics about resource usage of the process.
|
||||
"""
|
||||
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
# Ensure the homeserver has started.
|
||||
@@ -83,6 +93,7 @@ async def phone_stats_home(
|
||||
#
|
||||
|
||||
store = hs.get_datastores().main
|
||||
common_metrics = await hs.get_common_usage_metrics_manager().get_metrics()
|
||||
|
||||
stats["homeserver"] = hs.config.server.server_name
|
||||
stats["server_context"] = hs.config.server.server_context
|
||||
@@ -104,7 +115,7 @@ async def phone_stats_home(
|
||||
room_count = await store.get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = await store.count_daily_users()
|
||||
stats["daily_active_users"] = common_metrics.daily_active_users
|
||||
stats["monthly_active_users"] = await store.count_monthly_users()
|
||||
daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms()
|
||||
stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms
|
||||
|
||||
+12
-1
@@ -217,7 +217,18 @@ class KeyConfig(Config):
|
||||
|
||||
signing_keys = self.read_file(signing_key_path, name)
|
||||
try:
|
||||
return read_signing_keys(signing_keys.splitlines(True))
|
||||
loaded_signing_keys = read_signing_keys(
|
||||
[
|
||||
signing_key_line
|
||||
for signing_key_line in signing_keys.splitlines(keepends=False)
|
||||
if signing_key_line.strip()
|
||||
]
|
||||
)
|
||||
|
||||
if not loaded_signing_keys:
|
||||
raise ConfigError(f"No signing keys in file {signing_key_path}")
|
||||
|
||||
return loaded_signing_keys
|
||||
except Exception as e:
|
||||
raise ConfigError("Error reading %s: %s" % (name, str(e)))
|
||||
|
||||
|
||||
@@ -36,6 +36,12 @@ from ._util import validate_config
|
||||
|
||||
logger = logging.Logger(__name__)
|
||||
|
||||
DIRECT_TCP_ERROR = """
|
||||
Using direct TCP replication for workers is no longer supported.
|
||||
|
||||
Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis
|
||||
"""
|
||||
|
||||
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
|
||||
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
|
||||
# on IPv6 when '::' is set.
|
||||
@@ -165,7 +171,6 @@ KNOWN_LISTENER_TYPES = {
|
||||
"http",
|
||||
"metrics",
|
||||
"manhole",
|
||||
"replication",
|
||||
}
|
||||
|
||||
KNOWN_RESOURCES = {
|
||||
@@ -515,7 +520,9 @@ class ServerConfig(Config):
|
||||
):
|
||||
raise ConfigError("allowed_avatar_mimetypes must be a list")
|
||||
|
||||
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
|
||||
self.listeners = [
|
||||
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
|
||||
]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
@@ -880,9 +887,12 @@ def read_gc_thresholds(
|
||||
)
|
||||
|
||||
|
||||
def parse_listener_def(listener: Any) -> ListenerConfig:
|
||||
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
"""parse a listener config from the config file"""
|
||||
listener_type = listener["type"]
|
||||
# Raise a helpful error if direct TCP replication is still configured.
|
||||
if listener_type == "replication":
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
|
||||
|
||||
port = listener.get("port")
|
||||
if not isinstance(port, int):
|
||||
|
||||
@@ -27,7 +27,7 @@ from ._base import (
|
||||
RoutableShardedWorkerHandlingConfig,
|
||||
ShardedWorkerHandlingConfig,
|
||||
)
|
||||
from .server import ListenerConfig, parse_listener_def
|
||||
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
|
||||
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
|
||||
The send_federation config option must be disabled in the main
|
||||
@@ -128,7 +128,8 @@ class WorkerConfig(Config):
|
||||
self.worker_app = None
|
||||
|
||||
self.worker_listeners = [
|
||||
parse_listener_def(x) for x in config.get("worker_listeners", [])
|
||||
parse_listener_def(i, x)
|
||||
for i, x in enumerate(config.get("worker_listeners", []))
|
||||
]
|
||||
self.worker_daemonize = bool(config.get("worker_daemonize"))
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
@@ -142,7 +143,8 @@ class WorkerConfig(Config):
|
||||
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||
|
||||
# The port on the main synapse for TCP replication
|
||||
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||
if "worker_replication_port" in config:
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||
|
||||
@@ -109,7 +109,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
if event.format_version in (EventFormatVersions.ROOM_V1_V2,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
@@ -716,7 +716,7 @@ def check_redaction(
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
if room_version_obj.event_format == EventFormatVersions.V1:
|
||||
if room_version_obj.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
if not isinstance(event.redacts, str):
|
||||
return False
|
||||
|
||||
@@ -442,7 +442,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
format_version = EventFormatVersions.ROOM_V1_V2 # All events of this type are V1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -490,7 +490,7 @@ class FrozenEvent(EventBase):
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -567,7 +567,7 @@ class FrozenEventV2(EventBase):
|
||||
class FrozenEventV3(FrozenEventV2):
|
||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||
|
||||
format_version = EventFormatVersions.V3 # All events of this type are V3
|
||||
format_version = EventFormatVersions.ROOM_V4_PLUS # All events of this type are V3
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
@@ -597,11 +597,11 @@ def _event_type_from_format_version(
|
||||
`FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
return FrozenEvent
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
elif format_version == EventFormatVersions.ROOM_V3:
|
||||
return FrozenEventV2
|
||||
elif format_version == EventFormatVersions.V3:
|
||||
elif format_version == EventFormatVersions.ROOM_V4_PLUS:
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
@@ -137,7 +137,7 @@ class EventBuilder:
|
||||
# The types of auth/prev events changes between event versions.
|
||||
prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
auth_events = await self._store.add_event_hashes(auth_event_ids)
|
||||
prev_events = await self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
@@ -253,7 +253,7 @@ def create_local_event_from_event_dict(
|
||||
|
||||
time_now = int(clock.time_msec())
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
|
||||
@@ -45,7 +45,7 @@ class EventValidator:
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
if event.format_version == EventFormatVersions.V1:
|
||||
if event.format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
EventID.from_string(event.event_id)
|
||||
|
||||
required = [
|
||||
|
||||
@@ -194,7 +194,7 @@ async def _check_sigs_on_pdu(
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
event_domain = get_domain_from_id(pdu.event_id)
|
||||
if event_domain != sender_domain:
|
||||
try:
|
||||
|
||||
@@ -1190,7 +1190,7 @@ class FederationClient(FederationBase):
|
||||
# Otherwise, consider it a legitimate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
|
||||
@@ -62,12 +62,12 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"synapse_federation_client_sent_pdu_destinations_count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total",
|
||||
"synapse_federation_client_sent_pdu_destinations",
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ class AdminHandler:
|
||||
"appservice_id",
|
||||
"consent_server_notice_sent",
|
||||
"consent_version",
|
||||
"consent_ts",
|
||||
"user_type",
|
||||
"is_guest",
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ from synapse.types import (
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -124,6 +125,7 @@ class DeviceWorkerHandler:
|
||||
|
||||
return device
|
||||
|
||||
@cancellable
|
||||
async def get_device_changes_in_shared_rooms(
|
||||
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
|
||||
) -> Collection[str]:
|
||||
@@ -163,6 +165,7 @@ class DeviceWorkerHandler:
|
||||
|
||||
@trace
|
||||
@measure_func("device.get_user_ids_changed")
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
|
||||
@@ -37,7 +37,8 @@ from synapse.types import (
|
||||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async_helpers import Linearizer, delay_cancellation
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -91,6 +92,7 @@ class E2eKeysHandler:
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
self,
|
||||
query_body: JsonDict,
|
||||
@@ -208,22 +210,26 @@ class E2eKeysHandler:
|
||||
r[user_id] = remote_queries[user_id]
|
||||
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
# TODO It might make sense to propagate cancellations into the
|
||||
# deferreds which are querying remote homeservers.
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
delay_cancellation(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
)
|
||||
|
||||
ret = {"device_keys": results, "failures": failures}
|
||||
@@ -347,6 +353,7 @@ class E2eKeysHandler:
|
||||
|
||||
return
|
||||
|
||||
@cancellable
|
||||
async def get_cross_signing_keys_from_cache(
|
||||
self, query: Iterable[str], from_user_id: Optional[str]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
@@ -393,6 +400,7 @@ class E2eKeysHandler:
|
||||
}
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_local_devices(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, Requester, StreamKeyType
|
||||
@@ -423,6 +424,7 @@ class PaginationHandler:
|
||||
pagin_config: PaginationConfig,
|
||||
as_client_event: bool = True,
|
||||
event_filter: Optional[Filter] = None,
|
||||
use_admin_priviledge: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Get messages in a room.
|
||||
|
||||
@@ -432,10 +434,16 @@ class PaginationHandler:
|
||||
pagin_config: The pagination config rules to apply, if any.
|
||||
as_client_event: True to get events in client-server format.
|
||||
event_filter: Filter to apply to results or None
|
||||
use_admin_priviledge: if `True`, return all events, regardless
|
||||
of whether `user` has access to them. To be used **ONLY**
|
||||
from the admin API.
|
||||
|
||||
Returns:
|
||||
Pagination API results
|
||||
"""
|
||||
if use_admin_priviledge:
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if pagin_config.from_token:
|
||||
@@ -458,12 +466,14 @@ class PaginationHandler:
|
||||
room_token = from_token.room_key
|
||||
|
||||
async with self.pagination_lock.read(room_id):
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
if pagin_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
@@ -475,7 +485,7 @@ class PaginationHandler:
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
if membership == Membership.LEAVE:
|
||||
if not use_admin_priviledge and membership == Membership.LEAVE:
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
@@ -528,12 +538,13 @@ class PaginationHandler:
|
||||
if event_filter:
|
||||
events = await event_filter.filter(events)
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
if not use_admin_priviledge:
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
# return immediately - but there might be more in next_token batch
|
||||
|
||||
@@ -453,7 +453,6 @@ class RoomSummaryHandler:
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"room_id": e.room_id,
|
||||
"sender": e.sender,
|
||||
"origin_server_ts": e.origin_server_ts,
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user