Compare commits
54 Commits
v1.62.0
...
rei/synwor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00b5bba66e | ||
|
|
01747a28ca | ||
|
|
c347978164 | ||
|
|
415ba59e8b | ||
|
|
2b0ebf1b4d | ||
|
|
0b9f7b123e | ||
|
|
4332493df3 | ||
|
|
b8c6fd979a | ||
|
|
c14dcea4b6 | ||
|
|
22a7b762bc | ||
|
|
0c95313a44 | ||
|
|
bb20113c8f | ||
|
|
1391a76cd2 | ||
|
|
2b5ab8e367 | ||
|
|
4aaeb87dad | ||
|
|
fb7d24ab6d | ||
|
|
57f6f59e3e | ||
|
|
dcc7873700 | ||
|
|
a0f51b059c | ||
|
|
68db233f0c | ||
|
|
6ba732fefe | ||
|
|
68695d8007 | ||
|
|
578a5e24a9 | ||
|
|
347165bc06 | ||
|
|
2c2a42cc10 | ||
|
|
65e675504f | ||
|
|
e514495465 | ||
|
|
d102ad67fd | ||
|
|
5b5c943e7d | ||
|
|
dcc4e0621c | ||
|
|
6180e1bc4b | ||
|
|
9820665597 | ||
|
|
fa10468eb4 | ||
|
|
c04e25789e | ||
|
|
fe910fb10e | ||
|
|
5296c09473 | ||
|
|
d70ff5cc35 | ||
|
|
6da861ae69 | ||
|
|
8c2825276f | ||
|
|
c0efc689cb | ||
|
|
50f0e4028b | ||
|
|
b0366853ca | ||
|
|
046a6513bc | ||
|
|
8330fc9953 | ||
|
|
0ceb3af10b | ||
|
|
6ad012ef89 | ||
|
|
9667bad55d | ||
|
|
09f6e43025 | ||
|
|
80c7a06777 | ||
|
|
4d3b8fb23f | ||
|
|
13e359aec8 | ||
|
|
e714b8a057 | ||
|
|
92a0c18ef0 | ||
|
|
cdc0259449 |
36
.ci/scripts/setup_complement_prerequisites.sh
Executable file
36
.ci/scripts/setup_complement_prerequisites.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
|
||||
#
|
||||
# Must be called after Synapse has been checked out to `synapse/`.
|
||||
#
|
||||
set -eu
|
||||
|
||||
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
|
||||
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
|
||||
|
||||
block Set Go Version
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
endblock
|
||||
|
||||
block Check out Complement
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
synapse/.ci/scripts/checkout_complement.sh
|
||||
endblock
|
||||
395
.github/workflows/tests.yml
vendored
395
.github/workflows/tests.yml
vendored
@@ -10,403 +10,37 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install .
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
lint:
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
||||
with:
|
||||
typechecking-extras: "all"
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
extras: ["all"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
extras: "all"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
# Note: sqlite only; no postgres. Completely untested since poetry move.
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
export-data:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: [linting-done, portdb]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "10"
|
||||
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:${{ matrix.postgres-version }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: "Install custom gotestfmt template"
|
||||
run: |
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
synapse/scripts-dev/complement.sh --build-only
|
||||
while :; do
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -f -run TestSendJoinPartialStateResponse -json 2>&1 | gotestfmt | tee /tmp/xxx
|
||||
if grep ❌ /tmp/xxx; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
|
||||
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
|
||||
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
|
||||
complement-developonly:
|
||||
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
name: "Complement Workers (develop only)"
|
||||
|
||||
steps:
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
run: |
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: "Install custom gotestfmt template"
|
||||
run: |
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
@@ -414,15 +48,6 @@ jobs:
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- check-sampleconfig
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
46
.github/workflows/twisted_trunk.yml
vendored
46
.github/workflows/twisted_trunk.yml
vendored
@@ -96,6 +96,51 @@ jobs:
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
# This step is specific to the 'Twisted trunk' test run:
|
||||
- name: Patch dependencies
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.12
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.12 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure()
|
||||
@@ -103,6 +148,7 @@ jobs:
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
- complement
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
||||
1
changelog.d/13028.misc
Normal file
1
changelog.d/13028.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type annotations to `tests.utils`.
|
||||
1
changelog.d/13029.doc
Normal file
1
changelog.d/13029.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add an explanation of the `--report-stats` argument to the docs.
|
||||
1
changelog.d/13031.feature
Normal file
1
changelog.d/13031.feature
Normal file
@@ -0,0 +1 @@
|
||||
Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of /publicRooms by room type.
|
||||
3
changelog.d/13077.doc
Normal file
3
changelog.d/13077.doc
Normal file
@@ -0,0 +1,3 @@
|
||||
Clean up references to sample configuration and redirect users to the configuration manual instead.
|
||||
|
||||
|
||||
1
changelog.d/13079.misc
Normal file
1
changelog.d/13079.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enable Complement testing in the 'Twisted Trunk' CI runs.
|
||||
1
changelog.d/13086.doc
Normal file
1
changelog.d/13086.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add documentation for anonymised homeserver statistics collection.
|
||||
1
changelog.d/13100.misc
Normal file
1
changelog.d/13100.misc
Normal file
@@ -0,0 +1 @@
|
||||
Faster room joins: Handle race between persisting an event and un-partial stating a room.
|
||||
1
changelog.d/13103.misc
Normal file
1
changelog.d/13103.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing type hints to `synapse.logging`.
|
||||
1
changelog.d/13113.misc
Normal file
1
changelog.d/13113.misc
Normal file
@@ -0,0 +1 @@
|
||||
Raise a `DependencyError` on missing dependencies instead of a `ConfigError`.
|
||||
1
changelog.d/13116.doc
Normal file
1
changelog.d/13116.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation.
|
||||
1
changelog.d/13119.misc
Normal file
1
changelog.d/13119.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room.
|
||||
1
changelog.d/13125.feature
Normal file
1
changelog.d/13125.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a rate limit for local users sending invites.
|
||||
1
changelog.d/13127.misc
Normal file
1
changelog.d/13127.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments.
|
||||
1
changelog.d/13129.misc
Normal file
1
changelog.d/13129.misc
Normal file
@@ -0,0 +1 @@
|
||||
Only one-line SQL statements for logging and tracing.
|
||||
1
changelog.d/13131.bugfix
Normal file
1
changelog.d/13131.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix application service not being able to join remote federated room without a profile set.
|
||||
1
changelog.d/13132.doc
Normal file
1
changelog.d/13132.doc
Normal file
@@ -0,0 +1 @@
|
||||
Document how the Synapse team does reviews.
|
||||
1
changelog.d/13134.misc
Normal file
1
changelog.d/13134.misc
Normal file
@@ -0,0 +1 @@
|
||||
Apply ratelimiting earlier in processing of /send request.
|
||||
1
changelog.d/13135.misc
Normal file
1
changelog.d/13135.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enforce type annotations for `tests.test_server`.
|
||||
1
changelog.d/13136.misc
Normal file
1
changelog.d/13136.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type annotations to `tests.server`.
|
||||
1
changelog.d/13139.doc
Normal file
1
changelog.d/13139.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add a link to the configuration manual from the homeserver sample config documentation.
|
||||
1
changelog.d/13143.misc
Normal file
1
changelog.d/13143.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add support to `complement.sh` for skipping the docker build.
|
||||
1
changelog.d/13144.misc
Normal file
1
changelog.d/13144.misc
Normal file
@@ -0,0 +1 @@
|
||||
Faster joins: skip waiting for full state when processing incoming events over federation.
|
||||
1
changelog.d/13145.misc
Normal file
1
changelog.d/13145.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve exception handling when processing events received over federation.
|
||||
1
changelog.d/13148.feature
Normal file
1
changelog.d/13148.feature
Normal file
@@ -0,0 +1 @@
|
||||
Improve validation logic in Synapse's REST endpoints.
|
||||
1
changelog.d/13151.misc
Normal file
1
changelog.d/13151.misc
Normal file
@@ -0,0 +1 @@
|
||||
Faster room joins: fix race in recalculation of current room state.
|
||||
1
changelog.d/13152.misc
Normal file
1
changelog.d/13152.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`.
|
||||
1
changelog.d/13153.misc
Normal file
1
changelog.d/13153.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room.
|
||||
1
changelog.d/13157.misc
Normal file
1
changelog.d/13157.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enable Complement testing in the 'Twisted Trunk' CI runs.
|
||||
1
changelog.d/13158.misc
Normal file
1
changelog.d/13158.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add support to `complement.sh` for skipping the docker build.
|
||||
1
changelog.d/13159.misc
Normal file
1
changelog.d/13159.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve and fix type hints.
|
||||
1
changelog.d/13166.doc
Normal file
1
changelog.d/13166.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing links to config options.
|
||||
1
changelog.d/13167.misc
Normal file
1
changelog.d/13167.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update config used by Complement to allow device name lookup over federation.
|
||||
1
changelog.d/13174.bugfix
Normal file
1
changelog.d/13174.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages.
|
||||
1
changelog.d/13194.bugfix
Normal file
1
changelog.d/13194.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0.
|
||||
1
changelog.d/13195.misc
Normal file
1
changelog.d/13195.misc
Normal file
@@ -0,0 +1 @@
|
||||
Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script.
|
||||
1
changelog.d/13200.removal
Normal file
1
changelog.d/13200.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar.
|
||||
1
changelog.d/13207.docker
Normal file
1
changelog.d/13207.docker
Normal file
@@ -0,0 +1 @@
|
||||
Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1.
|
||||
1
changelog.d/13209.misc
Normal file
1
changelog.d/13209.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar).
|
||||
@@ -62,7 +62,13 @@ WORKDIR /synapse
|
||||
# Copy just what we need to run `poetry export`...
|
||||
COPY pyproject.toml poetry.lock /synapse/
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
|
||||
|
||||
# If specified, we won't verify the hashes of dependencies.
|
||||
# This is only needed if the hashes of dependencies cannot be checked for some
|
||||
# reason, such as when a git repository is used directly as a dependency.
|
||||
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
|
||||
|
||||
###
|
||||
### Stage 1: builder
|
||||
@@ -85,6 +91,7 @@ RUN \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
|
||||
@@ -67,6 +67,13 @@ The following environment variables are supported in `generate` mode:
|
||||
* `UID`, `GID`: the user id and group id to use for creating the data
|
||||
directories. If unset, and no user is set via `docker run --user`, defaults
|
||||
to `991`, `991`.
|
||||
* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
|
||||
Defaults to `INFO`.
|
||||
* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
|
||||
will log sensitive information such as access tokens.
|
||||
This should not be needed unless you are a developer attempting to debug something
|
||||
particularly tricky.
|
||||
|
||||
|
||||
## Postgres
|
||||
|
||||
|
||||
@@ -59,6 +59,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
synchrotron, \
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
# Empty string here means 'main process only'
|
||||
export SYNAPSE_WORKER_TYPES=""
|
||||
|
||||
@@ -81,6 +81,8 @@ rc_invites:
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
allow_device_name_lookup_over_federation: true
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
{% if use_forking_launcher %}
|
||||
[program:synapse_fork]
|
||||
command=/usr/local/bin/python -m synapse.app.complement_fork_starter
|
||||
{{ main_config_path }}
|
||||
synapse.app.homeserver
|
||||
--config-path="{{ main_config_path }}"
|
||||
--config-path=/conf/workers/shared.yaml
|
||||
{%- for worker in workers %}
|
||||
-- {{ worker.app }}
|
||||
--config-path="{{ main_config_path }}"
|
||||
--config-path=/conf/workers/shared.yaml
|
||||
--config-path=/conf/workers/{{ worker.name }}.yaml
|
||||
{%- endfor %}
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=unexpected
|
||||
exitcodes=0
|
||||
|
||||
{% else %}
|
||||
[program:synapse_main]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
|
||||
--config-path="{{ main_config_path }}"
|
||||
@@ -13,7 +34,7 @@ autorestart=unexpected
|
||||
exitcodes=0
|
||||
|
||||
|
||||
{% for worker in workers %}
|
||||
{% for worker in workers %}
|
||||
[program:synapse_{{ worker.name }}]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
|
||||
--config-path="{{ main_config_path }}"
|
||||
@@ -27,4 +48,5 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -2,7 +2,11 @@ version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
{% if include_worker_name_in_log_line %}
|
||||
format: '{{ worker_name }} | %(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% else %}
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
{% if LOG_FILE_PATH %}
|
||||
@@ -45,11 +49,17 @@ handlers:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
{% if not SYNAPSE_LOG_SENSITIVE %}
|
||||
{#
|
||||
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
|
||||
so that DEBUG entries (containing sensitive information) are not emitted.
|
||||
#}
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
|
||||
# Nginx will be configured to serve TLS on port 8448.
|
||||
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
|
||||
# only intended for usage in Complement at the moment.
|
||||
# No stability guarantees are provided.
|
||||
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
|
||||
# log level. INFO is the default.
|
||||
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
|
||||
# regardless of the SYNAPSE_LOG_LEVEL setting.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -35,7 +42,7 @@ import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
@@ -525,6 +532,7 @@ def generate_worker_files(
|
||||
"/etc/supervisor/conf.d/synapse.conf",
|
||||
workers=worker_descriptors,
|
||||
main_config_path=config_path,
|
||||
use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
|
||||
)
|
||||
|
||||
# healthcheck config
|
||||
@@ -548,18 +556,25 @@ def generate_worker_log_config(
|
||||
Returns: the path to the generated file
|
||||
"""
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args = {}
|
||||
extra_log_template_args: Dict[str, Optional[str]] = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
|
||||
|
||||
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
|
||||
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
|
||||
"SYNAPSE_LOG_SENSITIVE"
|
||||
)
|
||||
|
||||
# Render and write the file
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
worker_name=worker_name,
|
||||
**extra_log_template_args,
|
||||
include_worker_name_in_log_line=environ.get(
|
||||
"SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
|
||||
),
|
||||
)
|
||||
return log_config_filepath
|
||||
|
||||
|
||||
@@ -110,7 +110,11 @@ def generate_config_from_template(
|
||||
|
||||
log_config_file = environ["SYNAPSE_LOG_CONFIG"]
|
||||
log("Generating log config file " + log_config_file)
|
||||
convert("/conf/log.config", log_config_file, environ)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_file,
|
||||
{**environ, "include_worker_name_in_log_line": False},
|
||||
)
|
||||
|
||||
# Hopefully we already have a signing key, but generate one if not.
|
||||
args = [
|
||||
|
||||
@@ -69,6 +69,7 @@
|
||||
- [Federation](usage/administration/admin_api/federation.md)
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Reporting Anonymised Statistics](usage/administration/monitoring/reporting_anonymised_statistics.md)
|
||||
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
|
||||
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
|
||||
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
|
||||
@@ -80,6 +81,7 @@
|
||||
# Development
|
||||
- [Contributing Guide](development/contributing_guide.md)
|
||||
- [Code Style](code_style.md)
|
||||
- [Reviewing Code](development/reviews.md)
|
||||
- [Release Cycle](development/releases.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Testing]()
|
||||
|
||||
@@ -124,9 +124,8 @@ Body parameters:
|
||||
- `address` - string. Value of third-party ID.
|
||||
belonging to a user.
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in
|
||||
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
|
||||
section `sso` and `oidc_providers`.
|
||||
provider for SSO (Single sign-on). Details in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in the homeserver configuration. Note that no error is raised if the provided
|
||||
value is not in the homeserver configuration.
|
||||
|
||||
@@ -70,82 +70,61 @@ on save as they take a while and can be very resource intensive.
|
||||
- Avoid wildcard imports (`from synapse.types import *`) and
|
||||
relative imports (`from .types import UserID`).
|
||||
|
||||
## Configuration file format
|
||||
## Configuration code and documentation format
|
||||
|
||||
The [sample configuration file](./sample_config.yaml) acts as a
|
||||
When adding a configuration option to the code, if several settings are grouped into a single dict, ensure that your code
|
||||
correctly handles the top-level option being set to `None` (as it will be if no sub-options are enabled).
|
||||
|
||||
The [configuration manual](usage/configuration/config_documentation.md) acts as a
|
||||
reference to Synapse's configuration options for server administrators.
|
||||
Remember that many readers will be unfamiliar with YAML and server
|
||||
administration in general, so that it is important that the file be as
|
||||
easy to understand as possible, which includes following a consistent
|
||||
format.
|
||||
administration in general, so it is important that when you add
|
||||
a configuration option the documentation be as easy to understand as possible, which
|
||||
includes following a consistent format.
|
||||
|
||||
Some guidelines follow:
|
||||
|
||||
- Sections should be separated with a heading consisting of a single
|
||||
line prefixed and suffixed with `##`. There should be **two** blank
|
||||
lines before the section header, and **one** after.
|
||||
- Each option should be listed in the file with the following format:
|
||||
- A comment describing the setting. Each line of this comment
|
||||
should be prefixed with a hash (`#`) and a space.
|
||||
- Each option should be listed in the config manual with the following format:
|
||||
|
||||
- The name of the option, prefixed by `###`.
|
||||
|
||||
The comment should describe the default behaviour (ie, what
|
||||
- A comment which describes the default behaviour (i.e. what
|
||||
happens if the setting is omitted), as well as what the effect
|
||||
will be if the setting is changed.
|
||||
|
||||
Often, the comment end with something like "uncomment the
|
||||
following to <do action>".
|
||||
|
||||
- A line consisting of only `#`.
|
||||
- A commented-out example setting, prefixed with only `#`.
|
||||
- An example setting, using backticks to define the code block
|
||||
|
||||
For boolean (on/off) options, convention is that this example
|
||||
should be the *opposite* to the default (so the comment will end
|
||||
with "Uncomment the following to enable [or disable]
|
||||
<feature>." For other options, the example should give some
|
||||
non-default value which is likely to be useful to the reader.
|
||||
should be the *opposite* to the default. For other options, the example should give
|
||||
some non-default value which is likely to be useful to the reader.
|
||||
|
||||
- There should be a blank line between each option.
|
||||
- Where several settings are grouped into a single dict, *avoid* the
|
||||
convention where the whole block is commented out, resulting in
|
||||
comment lines starting `# #`, as this is hard to read and confusing
|
||||
to edit. Instead, leave the top-level config option uncommented, and
|
||||
follow the conventions above for sub-options. Ensure that your code
|
||||
correctly handles the top-level option being set to `None` (as it
|
||||
will be if no sub-options are enabled).
|
||||
- Lines should be wrapped at 80 characters.
|
||||
- Use two-space indents.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
|
||||
(`` ` ``) to refer to configuration options.
|
||||
- There should be a horizontal rule between each option, which can be achieved by adding `---` before and
|
||||
after the option.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
|
||||
Example:
|
||||
|
||||
---
|
||||
### `modules`
|
||||
|
||||
Use the `module` sub-option to add a module under `modules` to extend functionality.
|
||||
The `module` setting then has a sub-option, `config`, which can be used to define some configuration
|
||||
for the `module`.
|
||||
|
||||
Defaults to none.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
## Frobnication ##
|
||||
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
modules:
|
||||
- module: my_super_module.MySuperClass
|
||||
config:
|
||||
do_thing: true
|
||||
- module: my_other_super_module.SomeClass
|
||||
config: {}
|
||||
```
|
||||
---
|
||||
|
||||
Note that the sample configuration is generated from the synapse code
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
|
||||
Making sure that the output from this script matches the desired format
|
||||
is left as an exercise for the reader!
|
||||
|
||||
|
||||
@@ -309,6 +309,10 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
```
|
||||
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
@@ -347,7 +351,7 @@ To prepare a Pull Request, please:
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
6. that's it for now, a non-draft pull request will automatically request review from the team;
|
||||
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
|
||||
|
||||
|
||||
@@ -523,10 +527,13 @@ From this point, you should:
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
- A pull request is a conversation, if you disagree with the suggestions, please respond and discuss it.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
5. Once the pull request is ready for review again please re-request review from whichever developer did your initial
|
||||
review (or leave a comment in the pull request that you believe all required changes have been done).
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
|
||||
41
docs/development/reviews.md
Normal file
41
docs/development/reviews.md
Normal file
@@ -0,0 +1,41 @@
|
||||
Some notes on how we do reviews
|
||||
===============================
|
||||
|
||||
The Synapse team works off a shared review queue -- any new pull requests for
|
||||
Synapse (or related projects) has a review requested from the entire team. Team
|
||||
members should process this queue using the following rules:
|
||||
|
||||
* Any high urgency pull requests (e.g. fixes for broken continuous integration
|
||||
or fixes for release blockers);
|
||||
* Follow-up reviews for pull requests which have previously received reviews;
|
||||
* Any remaining pull requests.
|
||||
|
||||
For the latter two categories above, older pull requests should be prioritised.
|
||||
|
||||
It is explicit that there is no priority given to pull requests from the team
|
||||
(vs from the community). If a pull request requires a quick turn around, please
|
||||
explicitly communicate this via [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)
|
||||
or as a comment on the pull request.
|
||||
|
||||
Once an initial review has been completed and the author has made additional changes,
|
||||
follow-up reviews should go back to the same reviewer. This helps build a shared
|
||||
context and conversation between author and reviewer.
|
||||
|
||||
As a team we aim to keep the number of inflight pull requests to a minimum to ensure
|
||||
that ongoing work is finished before starting new work.
|
||||
|
||||
Performing a review
|
||||
-------------------
|
||||
|
||||
To communicate to the rest of the team the status of each pull request, team
|
||||
members should do the following:
|
||||
|
||||
* Assign themselves to the pull request (they should be left assigned to the
|
||||
pull request until it is merged, closed, or are no longer the reviewer);
|
||||
* Review the pull request by leaving comments, questions, and suggestions;
|
||||
* Mark the pull request appropriately (as needing changes or accepted).
|
||||
|
||||
If you are unsure about a particular part of the pull request (or are not confident
|
||||
in your understanding of part of the code) then ask questions or request review
|
||||
from the team again. When requesting review from the team be sure to leave a comment
|
||||
with the rationale on why you're putting it back in the queue.
|
||||
@@ -49,9 +49,8 @@ as follows:
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the JSON web token integration, you should then add a `jwt_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
To enable the JSON web token integration, you should then add a `jwt_config` option
|
||||
to your configuration file. See the [configuration manual](usage/configuration/config_documentation.md#jwt_config) for some
|
||||
sample settings.
|
||||
|
||||
## How to test JWT as a developer
|
||||
|
||||
@@ -13,8 +13,10 @@ environments where untrusted users have shell access.
|
||||
|
||||
## Configuring the manhole
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`. The configuration is slightly different if you're using docker.
|
||||
To enable it, first add the `manhole` listener configuration in your
|
||||
`homeserver.yaml`. You can find information on how to do that
|
||||
in the [configuration manual](usage/configuration/config_documentation.md#manhole_settings).
|
||||
The configuration is slightly different if you're using docker.
|
||||
|
||||
#### Docker config
|
||||
|
||||
|
||||
@@ -49,9 +49,9 @@ clients.
|
||||
|
||||
## Server configuration
|
||||
|
||||
Support for this feature can be enabled and configured in the
|
||||
`retention` section of the Synapse configuration file (see the
|
||||
[sample file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518)).
|
||||
Support for this feature can be enabled and configured by adding a the
|
||||
`retention` in the Synapse configuration file (see
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention)).
|
||||
|
||||
To enable support for message retention policies, set the setting
|
||||
`enabled` in this section to `true`.
|
||||
@@ -65,8 +65,8 @@ message retention policy configured in its state. This allows server
|
||||
admins to ensure that messages are never kept indefinitely in a server's
|
||||
database.
|
||||
|
||||
A default policy can be defined as such, in the `retention` section of
|
||||
the configuration file:
|
||||
A default policy can be defined as such, by adding the `retention` option in
|
||||
the configuration file and adding these sub-options:
|
||||
|
||||
```yaml
|
||||
default_policy:
|
||||
@@ -86,8 +86,8 @@ Purge jobs are the jobs that Synapse runs in the background to purge
|
||||
expired events from the database. They are only run if support for
|
||||
message retention policies is enabled in the server's configuration. If
|
||||
no configuration for purge jobs is configured by the server admin,
|
||||
Synapse will use a default configuration, which is described in the
|
||||
[sample configuration file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518).
|
||||
Synapse will use a default configuration, which is described here in the
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention).
|
||||
|
||||
Some server admins might want a finer control on when events are removed
|
||||
depending on an event's room's policy. This can be done by setting the
|
||||
@@ -137,8 +137,8 @@ the server's database.
|
||||
### Lifetime limits
|
||||
|
||||
Server admins can set limits on the values of `max_lifetime` to use when
|
||||
purging old events in a room. These limits can be defined as such in the
|
||||
`retention` section of the configuration file:
|
||||
purging old events in a room. These limits can be defined under the
|
||||
`retention` option in the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
|
||||
@@ -45,8 +45,8 @@ as follows:
|
||||
maintainer.
|
||||
|
||||
To enable the OpenID integration, you should then add a section to the `oidc_providers`
|
||||
setting in your configuration file (or uncomment one of the existing examples).
|
||||
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
|
||||
setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## Sample configs
|
||||
|
||||
@@ -143,6 +143,14 @@ to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
However, under no circumstances should the SQLite database be `VACUUM`ed between
|
||||
multiple runs of the script. Doing so can lead to an inconsistent copy of your database
|
||||
into Postgres.
|
||||
To avoid accidental error, the script will check that SQLite's `auto_vacuum` mechanism
|
||||
is disabled, but the script is not able to protect against a manual `VACUUM` operation
|
||||
performed either by the administrator or by any automated task that the administrator
|
||||
may have configured.
|
||||
|
||||
Note that the database may take up significantly more (25% - 100% more)
|
||||
space on disk after porting to Postgres.
|
||||
|
||||
|
||||
@@ -66,8 +66,8 @@ in Synapse can be deactivated.
|
||||
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be made in
|
||||
[homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
To deactivate the certificate validation, the following setting must be added to
|
||||
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
|
||||
@@ -232,7 +232,9 @@ python -m synapse.app.homeserver \
|
||||
--report-stats=[yes|no]
|
||||
```
|
||||
|
||||
... substituting an appropriate value for `--server-name`.
|
||||
... substituting an appropriate value for `--server-name` and choosing whether
|
||||
or not to report usage statistics (hostname, Synapse version, uptime, total
|
||||
users, etc.) to the developers via the `--report-stats` argument.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
@@ -405,11 +407,11 @@ The recommended way to do so is to set up a reverse proxy on port
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
- First, under the `listeners` option, add the configuration for the
|
||||
TLS-enabled listener like so:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
@@ -417,9 +419,11 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself.
|
||||
- You will also need to add the options `tls_certificate_path` and
|
||||
`tls_private_key_path`. to your configuration file. You will need to manage provisioning of
|
||||
these certificates yourself.
|
||||
- You can find more information about these options as well as how to configure synapse in the
|
||||
[configuration manual](../usage/configuration/config_documentation.md).
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
# Reporting Anonymised Statistics
|
||||
|
||||
When generating your Synapse configuration file, you are asked whether you
|
||||
would like to report anonymised statistics to Matrix.org. These statistics
|
||||
provide the foundation a glimpse into the number of Synapse homeservers
|
||||
participating in the network, as well as statistics such as the number of
|
||||
rooms being created and messages being sent. This feature is sometimes
|
||||
affectionately called "phone-home" stats. Reporting
|
||||
[is optional](../../configuration/config_documentation.md#report_stats)
|
||||
and the reporting endpoint
|
||||
[can be configured](../../configuration/config_documentation.md#report_stats_endpoint),
|
||||
in case you would like to instead report statistics from a set of homeservers
|
||||
to your own infrastructure.
|
||||
|
||||
This documentation aims to define the statistics available and the
|
||||
homeserver configuration options that exist to tweak it.
|
||||
|
||||
## Available Statistics
|
||||
|
||||
The following statistics are sent to the configured reporting endpoint:
|
||||
|
||||
| Statistic Name | Type | Description |
|
||||
|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). |
|
||||
| `cpu_average` | int | CPU time in % of a single core (not % of all cores). |
|
||||
| `homeserver` | string | The homeserver's server name. |
|
||||
| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. |
|
||||
| `timestamp` | int | The current time, represented as the number of seconds since the epoch. |
|
||||
| `uptime_seconds` | int | The number of seconds since the homeserver was last started. |
|
||||
| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. |
|
||||
| `total_users` | int | The number of registered users on the homeserver. |
|
||||
| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. |
|
||||
| `daily_user_type_native` | int | The number of native users created in the last 24 hours. |
|
||||
| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. |
|
||||
| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. |
|
||||
| `total_room_count` | int | The total number of rooms present on the homeserver. |
|
||||
| `daily_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 24 hours. |
|
||||
| `monthly_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 30 days. |
|
||||
| `daily_active_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.message` sent in them in the last 24 hours. |
|
||||
| `daily_active_e2ee_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.encrypted` sent in them in the last 24 hours. |
|
||||
| `daily_messages` | int | The number of (state) events with the type `m.room.message` seen in the last 24 hours. |
|
||||
| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. |
|
||||
| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. |
|
||||
| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. |
|
||||
| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. |
|
||||
| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. |
|
||||
| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "electron" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "mozilla" or "gecko" (case-insensitive) in the user agent string. |
|
||||
| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. |
|
||||
| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. |
|
||||
| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. |
|
||||
| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
|
||||
| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
|
||||
|
||||
|
||||
[^1]: Native matrix users and guests are always counted. If the
|
||||
[`track_puppeted_user_ips`](../../configuration/config_documentation.md#track_puppeted_user_ips)
|
||||
option is set to `true`, "puppeted" users (users that an Application Service have performed
|
||||
[an action on behalf of](https://spec.matrix.org/v1.3/application-service-api/#identity-assertion))
|
||||
will also be counted. Note that an Application Service can "puppet" any user in their
|
||||
[user namespace](https://spec.matrix.org/v1.3/application-service-api/#registration),
|
||||
not only users that the Application Service has created. If this happens, the Application Service
|
||||
will additionally be counted as a user (irrespective of `track_puppeted_user_ips`).
|
||||
|
||||
## Using a Custom Statistics Collection Server
|
||||
|
||||
If statistics reporting is enabled, the endpoint that Synapse sends metrics to is configured by the
|
||||
[`report_stats_endpoint`](../../configuration/config_documentation.md#report_stats_endpoint) config
|
||||
option. By default, statistics are sent to Matrix.org.
|
||||
|
||||
If you would like to set up your own statistics collection server and send metrics there, you may
|
||||
consider using one of the following known implementations:
|
||||
|
||||
* [Matrix.org's Panopticon](https://github.com/matrix-org/panopticon)
|
||||
* [Famedly's Barad-dûr](https://gitlab.com/famedly/company/devops/services/barad-dur)
|
||||
@@ -317,7 +317,7 @@ Example configuration:
|
||||
allow_public_rooms_without_auth: true
|
||||
```
|
||||
---
|
||||
### `allow_public_rooms_without_auth`
|
||||
### `allow_public_rooms_over_federation`
|
||||
|
||||
If set to true, allows any other homeserver to fetch the server's public
|
||||
rooms directory via federation. Defaults to false.
|
||||
@@ -591,7 +591,7 @@ Example configuration:
|
||||
dummy_events_threshold: 5
|
||||
```
|
||||
---
|
||||
Config option `delete_stale_devices_after`
|
||||
### `delete_stale_devices_after`
|
||||
|
||||
An optional duration. If set, Synapse will run a daily background task to log out and
|
||||
delete any device that hasn't been accessed for more than the specified amount of time.
|
||||
@@ -1843,7 +1843,7 @@ Example configuration:
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
Config options: `turn_username` and `turn_password`
|
||||
### `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
|
||||
@@ -2999,7 +2999,7 @@ This setting has the following sub-options:
|
||||
* `localdb_enabled`: Set to false to disable authentication against the local password
|
||||
database. This is ignored if `enabled` is false, and is only useful
|
||||
if you have other `password_providers`. Defaults to true.
|
||||
* `pepper`: Set the value here to a secret random string for extra security. # Uncomment and change to a secret random string for extra security.
|
||||
* `pepper`: Set the value here to a secret random string for extra security.
|
||||
DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc.
|
||||
Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows:
|
||||
@@ -3373,7 +3373,7 @@ alias_creation_rules:
|
||||
action: deny
|
||||
```
|
||||
---
|
||||
Config options: `room_list_publication_rules`
|
||||
### `room_list_publication_rules`
|
||||
|
||||
The `room_list_publication_rules` option controls who can publish and
|
||||
which rooms can be published in the public room list.
|
||||
|
||||
@@ -9,6 +9,9 @@ a real homeserver.yaml. Instead, if you are starting from scratch, please genera
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
|
||||
Documentation for all configuration options can be found in the
|
||||
[Configuration Manual](./config_documentation.md).
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_config.yaml}}
|
||||
```
|
||||
|
||||
@@ -4,5 +4,5 @@ Synapse supports authenticating users via the [Central Authentication
|
||||
Service protocol](https://en.wikipedia.org/wiki/Central_Authentication_Service)
|
||||
(CAS) natively.
|
||||
|
||||
Please see the `cas_config` and `sso` sections of the [Synapse configuration
|
||||
file](../../../configuration/homeserver_sample_config.md) for more details.
|
||||
Please see the [cas_config](../../../configuration/config_documentation.md#cas_config) and [sso](../../../configuration/config_documentation.md#sso)
|
||||
sections of the configuration manual for more details.
|
||||
8
mypy.ini
8
mypy.ini
@@ -56,7 +56,6 @@ exclude = (?x)
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_server.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
@@ -74,7 +73,6 @@ exclude = (?x)
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -89,9 +87,6 @@ disallow_untyped_defs = False
|
||||
[mypy-synapse.logging.opentracing]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.logging.scopecontextmanager]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
@@ -131,6 +126,9 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
||||
133
poetry.lock
generated
133
poetry.lock
generated
@@ -502,7 +502,7 @@ pyasn1 = ">=0.4.6"
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "4.8.0"
|
||||
version = "4.9.1"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1937,67 +1937,76 @@ ldap3 = [
|
||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||
]
|
||||
lxml = [
|
||||
{file = "lxml-4.8.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b92d40121dcbd74831b690a75533da703750f7041b4bf951befc657c37e5695a"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win32.whl", hash = "sha256:e01f9531ba5420838c801c21c1b0f45dbc9607cb22ea2cf132844453bec863a5"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win_amd64.whl", hash = "sha256:6259b511b0f2527e6d55ad87acc1c07b3cbffc3d5e050d7e7bcfa151b8202df9"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1010042bfcac2b2dc6098260a2ed022968dbdfaf285fc65a3acf8e4eb1ffd1bc"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fa56bb08b3dd8eac3a8c5b7d075c94e74f755fd9d8a04543ae8d37b1612dd170"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:31ba2cbc64516dcdd6c24418daa7abff989ddf3ba6d3ea6f6ce6f2ed6e754ec9"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:31499847fc5f73ee17dbe1b8e24c6dafc4e8d5b48803d17d22988976b0171f03"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5f7d7d9afc7b293147e2d506a4596641d60181a35279ef3aa5778d0d9d9123fe"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a3c5f1a719aa11866ffc530d54ad965063a8cbbecae6515acbd5f0fae8f48eaa"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6268e27873a3d191849204d00d03f65c0e343b3bcb518a6eaae05677c95621d1"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win32.whl", hash = "sha256:330bff92c26d4aee79c5bc4d9967858bdbe73fdbdbacb5daf623a03a914fe05b"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2582b238e1658c4061ebe1b4df53c435190d22457642377fd0cb30685cdfb76"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2bfc7e2a0601b475477c954bf167dee6d0f55cb167e3f3e7cefad906e7759f6"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a1547ff4b8a833511eeaceacbcd17b043214fcdb385148f9c1bc5556ca9623e2"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win32.whl", hash = "sha256:a9f1c3489736ff8e1c7652e9dc39f80cff820f23624f23d9eab6e122ac99b150"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win_amd64.whl", hash = "sha256:530f278849031b0eb12f46cca0e5db01cfe5177ab13bd6878c6e739319bae654"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:078306d19a33920004addeb5f4630781aaeabb6a8d01398045fcde085091a169"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:86545e351e879d0b72b620db6a3b96346921fa87b3d366d6c074e5a9a0b8dadb"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24f5c5ae618395ed871b3d8ebfcbb36e3f1091fd847bf54c4de623f9107942f3"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:bbab6faf6568484707acc052f4dfc3802bdb0cafe079383fbaa23f1cdae9ecd4"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7993232bd4044392c47779a3c7e8889fea6883be46281d45a81451acfd704d7e"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d6483b1229470e1d8835e52e0ff3c6973b9b97b24cd1c116dca90b57a2cc613"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ad4332a532e2d5acb231a2e5d33f943750091ee435daffca3fec0a53224e7e33"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win32.whl", hash = "sha256:db3535733f59e5605a88a706824dfcb9bd06725e709ecb017e165fc1d6e7d429"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5f148b0c6133fb928503cfcdfdba395010f997aa44bcf6474fcdd0c5398d9b63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:8a31f24e2a0b6317f33aafbb2f0895c0bce772980ae60c2c640d82caac49628a"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:719544565c2937c21a6f76d520e6e52b726d132815adb3447ccffbe9f44203c4"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:c0b88ed1ae66777a798dc54f627e32d3b81c8009967c63993c450ee4cbcbec15"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fa9b7c450be85bfc6cd39f6df8c5b8cbd76b5d6fc1f69efec80203f9894b885f"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9f84ed9f4d50b74fbc77298ee5c870f67cb7e91dcdc1a6915cb1ff6a317476c"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1d650812b52d98679ed6c6b3b55cbb8fe5a5460a0aef29aeb08dc0b44577df85"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:80bbaddf2baab7e6de4bc47405e34948e694a9efe0861c61cdc23aa774fcb141"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win32.whl", hash = "sha256:6f7b82934c08e28a2d537d870293236b1000d94d0b4583825ab9649aef7ddf63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e1fd7d2fe11f1cb63d3336d147c852f6d07de0d0020d704c6031b46a30b02ca8"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:5045ee1ccd45a89c4daec1160217d363fcd23811e26734688007c26f28c9e9e7"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0c1978ff1fd81ed9dcbba4f91cf09faf1f8082c9d72eb122e92294716c605428"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cbf2ff155b19dc4d4100f7442f6a697938bf4493f8d3b0c51d45568d5666b5"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ce13d6291a5f47c1c8dbd375baa78551053bc6b5e5c0e9bb8e39c0a8359fd52f"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11527dc23d5ef44d76fef11213215c34f36af1608074561fcc561d983aeb870"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60d2f60bd5a2a979df28ab309352cdcf8181bda0cca4529769a945f09aba06f9"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:62f93eac69ec0f4be98d1b96f4d6b964855b8255c345c17ff12c20b93f247b68"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win32.whl", hash = "sha256:20b8a746a026017acf07da39fdb10aa80ad9877046c9182442bf80c84a1c4696"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:891dc8f522d7059ff0024cd3ae79fd224752676447f9c678f2a5c14b84d9a939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b6fc2e2fb6f532cf48b5fed57567ef286addcef38c28874458a41b7837a57807"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:74eb65ec61e3c7c019d7169387d1b6ffcfea1b9ec5894d116a9a903636e4a0b1"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:627e79894770783c129cc5e89b947e52aa26e8e0557c7e205368a809da4b7939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:545bd39c9481f2e3f2727c78c169425efbfb3fbba6e7db4f46a80ebb249819ca"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a58d0b12f5053e270510bf12f753a76aaf3d74c453c00942ed7d2c804ca845c"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec4b4e75fc68da9dc0ed73dcdb431c25c57775383fec325d23a770a64e7ebc87"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5804e04feb4e61babf3911c2a974a5b86f66ee227cc5006230b00ac6d285b3a9"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win32.whl", hash = "sha256:aa0cf4922da7a3c905d000b35065df6184c0dc1d866dd3b86fd961905bbad2ea"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:dd10383f1d6b7edf247d0960a3db274c07e96cf3a3fc7c41c8448f93eac3fb1c"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2403a6d6fb61c285969b71f4a3527873fe93fd0abe0832d858a17fe68c8fa507"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:986b7a96228c9b4942ec420eff37556c5777bfba6758edcb95421e4a614b57f9"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6fe4ef4402df0250b75ba876c3795510d782def5c1e63890bde02d622570d39e"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:f10ce66fcdeb3543df51d423ede7e238be98412232fca5daec3e54bcd16b8da0"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93"},
|
||||
{file = "lxml-4.8.0.tar.gz", hash = "sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"},
|
||||
{file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"},
|
||||
]
|
||||
markupsafe = [
|
||||
{file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},
|
||||
|
||||
@@ -14,12 +14,18 @@
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
# You can optionally give a "-f" argument (for "fast") before any to skip
|
||||
# rebuilding the docker images, if you just want to rerun the tests.
|
||||
#
|
||||
# Remaining commandline arguments are passed through to `go test`. For example,
|
||||
# you can supply a regular expression of test method names via the "-run"
|
||||
# argument:
|
||||
#
|
||||
# ./complement.sh -run "TestOutboundFederation(Profile|Send)"
|
||||
#
|
||||
# Specifying TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 will cause `poetry export`
|
||||
# to not emit any hashes when building the Docker image. This then means that
|
||||
# you can use 'unverifiable' sources such as git repositories as dependencies.
|
||||
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
@@ -32,6 +38,47 @@ echo_if_github() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper to print out the usage instructions
|
||||
usage() {
|
||||
cat >&2 <<EOF
|
||||
Usage: $0 [-f] <go test arguments>...
|
||||
Run the complement test suite on Synapse.
|
||||
|
||||
-f, --fast
|
||||
Skip rebuilding the docker images, and just use the most recent
|
||||
'complement-synapse:latest' image.
|
||||
Conflicts with --build-only.
|
||||
|
||||
--build-only
|
||||
Only build the Docker images. Don't actually run Complement.
|
||||
Conflicts with -f/--fast.
|
||||
|
||||
For help on arguments to 'go test', run 'go help testflag'.
|
||||
EOF
|
||||
}
|
||||
|
||||
# parse our arguments
|
||||
skip_docker_build=""
|
||||
skip_complement_run=""
|
||||
while [ $# -ge 1 ]; do
|
||||
arg=$1
|
||||
case "$arg" in
|
||||
"-h")
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
"-f"|"--fast")
|
||||
skip_docker_build=1
|
||||
;;
|
||||
"--build-only")
|
||||
skip_complement_run=1
|
||||
;;
|
||||
*)
|
||||
# unknown arg: presumably an argument to gotest. break the loop.
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
@@ -49,21 +96,30 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
if [ -z "$skip_docker_build" ]; then
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
fi
|
||||
|
||||
if [ -n "$skip_complement_run" ]; then
|
||||
echo "Skipping Complement run as requested."
|
||||
exit
|
||||
fi
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
|
||||
@@ -104,6 +160,18 @@ else
|
||||
test_tags="$test_tags,faster_joins"
|
||||
fi
|
||||
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
|
||||
|
||||
# Allow logging sensitive things (currently SQL queries & parameters).
|
||||
# (This won't have any effect if we're not logging at DEBUG level overall.)
|
||||
# Since this is just a test suite, this is fine and won't reveal anyone's
|
||||
# personal information
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -268,6 +268,9 @@ class MockHomeserver:
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
def should_send_federation(self) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
class Porter:
|
||||
def __init__(
|
||||
@@ -618,6 +621,25 @@ class Porter:
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_sqlite_autovacuum_enabled(txn: LoggingTransaction) -> bool:
|
||||
"""
|
||||
Returns true if auto_vacuum is enabled in SQLite.
|
||||
https://www.sqlite.org/pragma.html#pragma_auto_vacuum
|
||||
|
||||
Vacuuming changes the rowids on rows in the database.
|
||||
Auto-vacuuming is therefore dangerous when used in conjunction with this script.
|
||||
|
||||
Note that the auto_vacuum setting can't be changed without performing
|
||||
a VACUUM after trying to change the pragma.
|
||||
"""
|
||||
txn.execute("PRAGMA auto_vacuum")
|
||||
row = txn.fetchone()
|
||||
assert row is not None, "`PRAGMA auto_vacuum` did not give a row."
|
||||
(autovacuum_setting,) = row
|
||||
# 0 means off. 1 means full. 2 means incremental.
|
||||
return autovacuum_setting != 0
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Ports the SQLite database to a PostgreSQL database.
|
||||
|
||||
@@ -634,6 +656,21 @@ class Porter:
|
||||
allow_outdated_version=True,
|
||||
)
|
||||
|
||||
# For safety, ensure auto_vacuums are disabled.
|
||||
if await self.sqlite_store.db_pool.runInteraction(
|
||||
"is_sqlite_autovacuum_enabled", self._is_sqlite_autovacuum_enabled
|
||||
):
|
||||
end_error = (
|
||||
"auto_vacuum is enabled in the SQLite database."
|
||||
" (This is not the default configuration.)\n"
|
||||
" This script relies on rowids being consistent and must not"
|
||||
" be used if the database could be vacuumed between re-runs.\n"
|
||||
" To disable auto_vacuum, you need to stop Synapse and run the following SQL:\n"
|
||||
" PRAGMA auto_vacuum=off;\n"
|
||||
" VACUUM;"
|
||||
)
|
||||
return
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
|
||||
|
||||
@@ -259,3 +259,13 @@ class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
class PublicRoomsFilterFields:
|
||||
"""Fields in the search filter for `/publicRooms` that we understand.
|
||||
|
||||
As defined in https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3publicrooms
|
||||
"""
|
||||
|
||||
GENERIC_SEARCH_TERM: Final = "generic_search_term"
|
||||
ROOM_TYPES: Final = "org.matrix.msc3827.room_types"
|
||||
|
||||
@@ -106,7 +106,9 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
def start_worker_reactor(
|
||||
appname: str,
|
||||
config: HomeServerConfig,
|
||||
run_command: Callable[[], None] = reactor.run,
|
||||
# Use a lambda to avoid binding to a given reactor at import time.
|
||||
# (needed when synapse.app.complement_fork_starter is being used)
|
||||
run_command: Callable[[], None] = lambda: reactor.run(),
|
||||
) -> None:
|
||||
"""Run the reactor in the main process
|
||||
|
||||
@@ -141,7 +143,9 @@ def start_reactor(
|
||||
daemonize: bool,
|
||||
print_pidfile: bool,
|
||||
logger: logging.Logger,
|
||||
run_command: Callable[[], None] = reactor.run,
|
||||
# Use a lambda to avoid binding to a given reactor at import time.
|
||||
# (needed when synapse.app.complement_fork_starter is being used)
|
||||
run_command: Callable[[], None] = lambda: reactor.run(),
|
||||
) -> None:
|
||||
"""Run the reactor in the main process
|
||||
|
||||
|
||||
190
synapse/app/complement_fork_starter.py
Normal file
190
synapse/app/complement_fork_starter.py
Normal file
@@ -0,0 +1,190 @@
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# ## What this script does
|
||||
#
|
||||
# This script spawns multiple workers, whilst only going through the code loading
|
||||
# process once. The net effect is that start-up time for a swarm of workers is
|
||||
# reduced, particularly in CPU-constrained environments.
|
||||
#
|
||||
# Before the workers are spawned, the database is prepared in order to avoid the
|
||||
# workers racing.
|
||||
#
|
||||
# ## Stability
|
||||
#
|
||||
# This script is only intended for use within the Synapse images for the
|
||||
# Complement test suite.
|
||||
# There are currently no stability guarantees whatsoever; especially not about:
|
||||
# - whether it will continue to exist in future versions;
|
||||
# - the format of its command-line arguments; or
|
||||
# - any details about its behaviour or principles of operation.
|
||||
#
|
||||
# ## Usage
|
||||
#
|
||||
# The first argument should be the path to the database configuration, used to
|
||||
# set up the database. The rest of the arguments are used as follows:
|
||||
# Each worker is specified as an argument group (each argument group is
|
||||
# separated by '--').
|
||||
# The first argument in each argument group is the Python module name of the application
|
||||
# to start. Further arguments are then passed to that module as-is.
|
||||
#
|
||||
# ## Example
|
||||
#
|
||||
# python -m synapse.app.complement_fork_starter path_to_db_config.yaml \
|
||||
# synapse.app.homeserver [args..] -- \
|
||||
# synapse.app.generic_worker [args..] -- \
|
||||
# ...
|
||||
# synapse.app.generic_worker [args..]
|
||||
#
|
||||
import argparse
|
||||
import importlib
|
||||
import itertools
|
||||
import multiprocessing
|
||||
import sys
|
||||
from typing import Any, Callable, List
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
|
||||
class ProxiedReactor:
|
||||
"""
|
||||
Twisted tracks the 'installed' reactor as a global variable.
|
||||
(Actually, it does some module trickery, but the effect is similar.)
|
||||
|
||||
The default EpollReactor is buggy if it's created before a process is
|
||||
forked, then used in the child.
|
||||
See https://twistedmatrix.com/trac/ticket/4759#comment:17.
|
||||
|
||||
However, importing certain Twisted modules will automatically create and
|
||||
install a reactor if one hasn't already been installed.
|
||||
It's not normally possible to re-install a reactor.
|
||||
|
||||
Given the goal of launching workers with fork() to only import the code once,
|
||||
this presents a conflict.
|
||||
Our work around is to 'install' this ProxiedReactor which prevents Twisted
|
||||
from creating and installing one, but which lets us replace the actual reactor
|
||||
in use later on.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.___reactor_target: Any = None
|
||||
|
||||
def _install_real_reactor(self, new_reactor: Any) -> None:
|
||||
"""
|
||||
Install a real reactor for this ProxiedReactor to forward lookups onto.
|
||||
|
||||
This method is specific to our ProxiedReactor and should not clash with
|
||||
any names used on an actual Twisted reactor.
|
||||
"""
|
||||
self.___reactor_target = new_reactor
|
||||
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self.___reactor_target, attr_name)
|
||||
|
||||
|
||||
def _worker_entrypoint(
|
||||
func: Callable[[], None], proxy_reactor: ProxiedReactor, args: List[str]
|
||||
) -> None:
|
||||
"""
|
||||
Entrypoint for a forked worker process.
|
||||
|
||||
We just need to set up the command-line arguments, create our real reactor
|
||||
and then kick off the worker's main() function.
|
||||
"""
|
||||
|
||||
sys.argv = args
|
||||
|
||||
from twisted.internet.epollreactor import EPollReactor
|
||||
|
||||
proxy_reactor._install_real_reactor(EPollReactor())
|
||||
func()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entrypoint for the forking launcher.
|
||||
"""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("db_config", help="Path to database config file")
|
||||
parser.add_argument(
|
||||
"args",
|
||||
nargs="...",
|
||||
help="Argument groups separated by `--`. "
|
||||
"The first argument of each group is a Synapse app name. "
|
||||
"Subsequent arguments are passed through.",
|
||||
)
|
||||
ns = parser.parse_args()
|
||||
|
||||
# Split up the subsequent arguments into each workers' arguments;
|
||||
# `--` is our delimiter of choice.
|
||||
args_by_worker: List[List[str]] = [
|
||||
list(args)
|
||||
for cond, args in itertools.groupby(ns.args, lambda ele: ele != "--")
|
||||
if cond and args
|
||||
]
|
||||
|
||||
# Prevent Twisted from installing a shared reactor that all the workers will
|
||||
# inherit when we fork(), by installing our own beforehand.
|
||||
proxy_reactor = ProxiedReactor()
|
||||
installReactor(proxy_reactor)
|
||||
|
||||
# Import the entrypoints for all the workers.
|
||||
worker_functions = []
|
||||
for worker_args in args_by_worker:
|
||||
worker_module = importlib.import_module(worker_args[0])
|
||||
worker_functions.append(worker_module.main)
|
||||
|
||||
# We need to prepare the database first as otherwise all the workers will
|
||||
# try to create a schema version table and some will crash out.
|
||||
from synapse._scripts import update_synapse_database
|
||||
|
||||
update_proc = multiprocessing.Process(
|
||||
target=_worker_entrypoint,
|
||||
args=(
|
||||
update_synapse_database.main,
|
||||
proxy_reactor,
|
||||
[
|
||||
"update_synapse_database",
|
||||
"--database-config",
|
||||
ns.db_config,
|
||||
"--run-background-updates",
|
||||
],
|
||||
),
|
||||
)
|
||||
print("===== PREPARING DATABASE =====", file=sys.stderr)
|
||||
update_proc.start()
|
||||
update_proc.join()
|
||||
print("===== PREPARED DATABASE =====", file=sys.stderr)
|
||||
|
||||
# At this point, we've imported all the main entrypoints for all the workers.
|
||||
# Now we basically just fork() out to create the workers we need.
|
||||
# Because we're using fork(), all the workers get a clone of this launcher's
|
||||
# memory space and don't need to repeat the work of loading the code!
|
||||
# Instead of using fork() directly, we use the multiprocessing library,
|
||||
# which uses fork() on Unix platforms.
|
||||
processes = []
|
||||
for (func, worker_args) in zip(worker_functions, args_by_worker):
|
||||
process = multiprocessing.Process(
|
||||
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
|
||||
)
|
||||
process.start()
|
||||
processes.append(process)
|
||||
|
||||
# Be a good parent and wait for our children to die before exiting.
|
||||
for process in processes:
|
||||
process.join()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -21,7 +21,7 @@ from typing import Any, Callable, Dict, Optional
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -159,12 +159,7 @@ class CacheConfig(Config):
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
try:
|
||||
check_requirements("cache_memory")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("cache_memory")
|
||||
|
||||
expire_caches = cache_config.get("expire_caches", True)
|
||||
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
|
||||
|
||||
@@ -145,7 +145,7 @@ class EmailConfig(Config):
|
||||
raise ConfigError(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
"Please consult the configuration manual at docs/usage/configuration/config_documentation.md for "
|
||||
"details and update your config file."
|
||||
)
|
||||
|
||||
|
||||
@@ -87,3 +87,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3715: dir param on /relations.
|
||||
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
|
||||
|
||||
# MSC3827: Filtering of /publicRooms by room type
|
||||
self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False)
|
||||
|
||||
@@ -15,14 +15,9 @@
|
||||
from typing import Any
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_AUTHLIB = """Missing authlib library. This is required for jwt login.
|
||||
|
||||
Install by running:
|
||||
pip install synapse[jwt]
|
||||
"""
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class JWTConfig(Config):
|
||||
@@ -41,13 +36,7 @@ class JWTConfig(Config):
|
||||
# that the claims exist on the JWT.
|
||||
self.jwt_issuer = jwt_config.get("issuer")
|
||||
self.jwt_audiences = jwt_config.get("audiences")
|
||||
|
||||
try:
|
||||
from authlib.jose import JsonWebToken
|
||||
|
||||
JsonWebToken # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_AUTHLIB)
|
||||
check_requirements("jwt")
|
||||
else:
|
||||
self.jwt_enabled = False
|
||||
self.jwt_secret = None
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, Optional
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -57,12 +57,7 @@ class MetricsConfig(Config):
|
||||
|
||||
self.sentry_enabled = "sentry" in config
|
||||
if self.sentry_enabled:
|
||||
try:
|
||||
check_requirements("sentry")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("sentry")
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.types import JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ..util.check_dependencies import DependencyException, check_requirements
|
||||
from ..util.check_dependencies import check_requirements
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
|
||||
@@ -41,12 +41,7 @@ class OIDCConfig(Config):
|
||||
if not self.oidc_providers:
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("oidc")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
) from e
|
||||
check_requirements("oidc")
|
||||
|
||||
# check we don't have any duplicate idp_ids now. (The SSO handler will also
|
||||
# check for duplicates when the REST listeners get registered, but that happens
|
||||
@@ -146,7 +141,6 @@ OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = {
|
||||
"allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}]
|
||||
}
|
||||
|
||||
|
||||
# the `oidc_providers` list can either be None (as it is in the default config), or
|
||||
# a list of provider configs, each of which requires an explicit ID and name.
|
||||
OIDC_PROVIDER_LIST_SCHEMA = {
|
||||
|
||||
@@ -136,6 +136,11 @@ class RatelimitConfig(Config):
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_issuer = RateLimitConfig(
|
||||
config.get("rc_invites", {}).get("per_issuer", {}),
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
|
||||
self.rc_third_party_invite = RateLimitConfig(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
defaults={
|
||||
|
||||
@@ -21,7 +21,7 @@ import attr
|
||||
|
||||
from synapse.config.server import generate_ip_set
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -184,13 +184,7 @@ class ContentRepositoryConfig(Config):
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
try:
|
||||
check_requirements("url_preview")
|
||||
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("url_preview")
|
||||
|
||||
proxy_env = getproxies_environment()
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, List, Set
|
||||
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
from synapse.util.module_loader import load_module, load_python_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -76,12 +76,7 @@ class SAML2Config(Config):
|
||||
if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("saml2")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("saml2")
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
from typing import Any, List, Set
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -40,12 +40,7 @@ class TracerConfig(Config):
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("opentracing")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("opentracing")
|
||||
|
||||
# The tracer is enabled so sanitize the config
|
||||
|
||||
|
||||
@@ -464,14 +464,7 @@ class ThirdPartyEventRules:
|
||||
Returns:
|
||||
A dict mapping (event type, state key) to state event.
|
||||
"""
|
||||
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
|
||||
room_state_events = await self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
return state_events
|
||||
return await self._storage_controllers.state.get_current_state(room_id)
|
||||
|
||||
async def on_profile_update(
|
||||
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
|
||||
|
||||
@@ -67,6 +67,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
@@ -882,9 +883,20 @@ class FederationServer(FederationBase):
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
try:
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were persisting the event.
|
||||
# Try once more, with full state this time.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated during `on_send_membership_event`, trying again.",
|
||||
room_id,
|
||||
)
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
|
||||
async def on_event_auth(
|
||||
self, origin: str, room_id: str, event_id: str
|
||||
|
||||
@@ -45,6 +45,7 @@ from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
FederationError,
|
||||
HttpResponseException,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
@@ -64,6 +65,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationCleanRoomRestServlet,
|
||||
ReplicationStoreRoomOnOutlierMembershipRestServlet,
|
||||
)
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
@@ -549,15 +551,29 @@ class FederationHandler:
|
||||
# https://github.com/matrix-org/synapse/issues/12998
|
||||
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
|
||||
|
||||
max_stream_id = await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
try:
|
||||
max_stream_id = (
|
||||
await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
)
|
||||
except PartialStateConflictError as e:
|
||||
# The homeserver was already in the room and it is no longer partial
|
||||
# stated. We ought to be doing a local join instead. Turn the error into
|
||||
# a 429, as a hint to the client to try again.
|
||||
# TODO(faster_joins): `_should_perform_remote_join` suggests that we may
|
||||
# do a remote join for restricted rooms even if we have full state.
|
||||
logger.error(
|
||||
"Room %s was un-partial stated while processing remote join.",
|
||||
room_id,
|
||||
)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
@@ -1543,14 +1559,9 @@ class FederationHandler:
|
||||
# all the events are updated, so we can update current state and
|
||||
# clear the lazy-loading flag.
|
||||
logger.info("Updating current state for %s", room_id)
|
||||
# TODO(faster_joins): support workers
|
||||
# TODO(faster_joins): notify workers in notify_room_un_partial_stated
|
||||
# https://github.com/matrix-org/synapse/issues/12994
|
||||
assert (
|
||||
self._storage_controllers.persistence is not None
|
||||
), "worker-mode deployments not currently supported here"
|
||||
await self._storage_controllers.persistence.update_current_state(
|
||||
room_id
|
||||
)
|
||||
await self.state_handler.update_current_state(room_id)
|
||||
|
||||
logger.info("Clearing partial-state flag for %s", room_id)
|
||||
success = await self.store.clear_partial_state_room(room_id)
|
||||
@@ -1567,11 +1578,6 @@ class FederationHandler:
|
||||
|
||||
# we raced against more events arriving with partial state. Go round
|
||||
# the loop again. We've already logged a warning, so no need for more.
|
||||
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
|
||||
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
|
||||
# having partial state.
|
||||
# https://github.com/matrix-org/synapse/issues/12988
|
||||
#
|
||||
continue
|
||||
|
||||
events = await self.store.get_events_as_list(
|
||||
|
||||
@@ -64,6 +64,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEventsRestServlet,
|
||||
)
|
||||
from synapse.state import StateResolutionStore
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
@@ -275,7 +276,16 @@ class FederationEventHandler:
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
try:
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were processing the PDU.
|
||||
# Try once more, with full state this time.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated while processing the PDU, trying again.",
|
||||
room_id,
|
||||
)
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
@@ -306,6 +316,9 @@ class FederationEventHandler:
|
||||
|
||||
Raises:
|
||||
SynapseError if the event is not accepted into the room
|
||||
PartialStateConflictError if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should
|
||||
retry exactly once in this case.
|
||||
"""
|
||||
logger.debug(
|
||||
"on_send_membership_event: Got event: %s, signatures: %s",
|
||||
@@ -423,6 +436,8 @@ class FederationEventHandler:
|
||||
|
||||
Raises:
|
||||
SynapseError if the response is in some way invalid.
|
||||
PartialStateConflictError if the homeserver is already in the room and it
|
||||
has been un-partial stated.
|
||||
"""
|
||||
create_event = None
|
||||
for e in state:
|
||||
@@ -1084,28 +1099,31 @@ class FederationEventHandler:
|
||||
|
||||
state_ids: Normally None, but if we are handling a gap in the graph
|
||||
(ie, we are missing one or more prev_events), the resolved state at the
|
||||
event
|
||||
event. Must not be partial state.
|
||||
|
||||
backfilled: True if this is part of a historical batch of events (inhibits
|
||||
notification to clients, and validation of device keys.)
|
||||
|
||||
PartialStateConflictError: if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should retry
|
||||
exactly once in this case. Will never be raised if `state_ids` is provided.
|
||||
"""
|
||||
logger.debug("Processing event: %s", event)
|
||||
assert not event.internal_metadata.outlier
|
||||
|
||||
context = await self._state_handler.compute_event_context(
|
||||
event,
|
||||
state_ids_before_event=state_ids,
|
||||
)
|
||||
try:
|
||||
context = await self._state_handler.compute_event_context(
|
||||
event,
|
||||
state_ids_before_event=state_ids,
|
||||
)
|
||||
context = await self._check_event_auth(
|
||||
origin,
|
||||
event,
|
||||
context,
|
||||
)
|
||||
except AuthError as e:
|
||||
# FIXME richvdh 2021/10/07 I don't think this is reachable. Let's log it
|
||||
# for now
|
||||
logger.exception("Unexpected AuthError from _check_event_auth")
|
||||
# This happens only if we couldn't find the auth events. We'll already have
|
||||
# logged a warning, so now we just convert to a FederationError.
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
|
||||
|
||||
if not backfilled and not context.rejected:
|
||||
@@ -1934,6 +1952,9 @@ class FederationEventHandler:
|
||||
event: The event itself.
|
||||
context: The event context.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
# this method should not be called on outliers (those code paths call
|
||||
# persist_events_and_notify directly.)
|
||||
@@ -1986,6 +2007,10 @@ class FederationEventHandler:
|
||||
|
||||
Returns:
|
||||
The stream ID after which all events have been persisted.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
if not event_and_contexts:
|
||||
return self._store.get_room_max_stream_ordering()
|
||||
@@ -1994,14 +2019,19 @@ class FederationEventHandler:
|
||||
if instance != self._instance_name:
|
||||
# Limit the number of events sent over replication. We choose 200
|
||||
# here as that is what we default to in `max_request_body_size(..)`
|
||||
for batch in batch_iter(event_and_contexts, 200):
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self._store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=batch,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
try:
|
||||
for batch in batch_iter(event_and_contexts, 200):
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self._store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=batch,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.CONFLICT:
|
||||
raise PartialStateConflictError()
|
||||
raise
|
||||
return result["max_stream_id"]
|
||||
else:
|
||||
assert self._storage_controllers.persistence
|
||||
|
||||
@@ -37,6 +37,7 @@ from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
ConsentNotGivenError,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
ShadowBanError,
|
||||
SynapseError,
|
||||
@@ -53,6 +54,7 @@ from synapse.handlers.directory import DirectoryHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
@@ -903,6 +905,9 @@ class EventCreationHandler:
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
raise ShadowBanError()
|
||||
|
||||
if ratelimit:
|
||||
await self.request_ratelimiter.ratelimit(requester, update=False)
|
||||
|
||||
# We limit the number of concurrent event sends in a room so that we
|
||||
# don't fork the DAG too much. If we don't limit then we can end up in
|
||||
# a situation where event persistence can't keep up, causing
|
||||
@@ -1247,6 +1252,8 @@ class EventCreationHandler:
|
||||
|
||||
Raises:
|
||||
ShadowBanError if the requester has been shadow-banned.
|
||||
SynapseError(503) if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
extra_users = extra_users or []
|
||||
|
||||
@@ -1297,24 +1304,35 @@ class EventCreationHandler:
|
||||
|
||||
# We now persist the event (and update the cache in parallel, since we
|
||||
# don't want to block on it).
|
||||
result, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
try:
|
||||
result, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
),
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(
|
||||
log_failure, "cache_joined_hosts_for_event failed"
|
||||
),
|
||||
),
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
|
||||
),
|
||||
consumeErrors=True,
|
||||
consumeErrors=True,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
except PartialStateConflictError as e:
|
||||
# The event context needs to be recomputed.
|
||||
# Turn the error into a 429, as a hint to the client to try again.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated while persisting client event.",
|
||||
event.room_id,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1329,6 +1347,9 @@ class EventCreationHandler:
|
||||
"""Actually persists the event. Should only be called by
|
||||
`handle_new_client_event`, and see its docstring for documentation of
|
||||
the arguments.
|
||||
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
|
||||
# Skip push notification actions for historical messages
|
||||
@@ -1345,16 +1366,21 @@ class EventCreationHandler:
|
||||
# If we're a worker we need to hit out to the master.
|
||||
writer_instance = self._events_shard_config.get_instance(event.room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
result = await self.send_event(
|
||||
instance_name=writer_instance,
|
||||
event_id=event.event_id,
|
||||
store=self.store,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
try:
|
||||
result = await self.send_event(
|
||||
instance_name=writer_instance,
|
||||
event_id=event.event_id,
|
||||
store=self.store,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.CONFLICT:
|
||||
raise PartialStateConflictError()
|
||||
raise
|
||||
stream_id = result["stream_id"]
|
||||
event_id = result["event_id"]
|
||||
if event_id != event.event_id:
|
||||
@@ -1482,6 +1508,10 @@ class EventCreationHandler:
|
||||
The persisted event. This may be different than the given event if
|
||||
it was de-duplicated (e.g. because we had already persisted an
|
||||
event with the same transaction ID.)
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
extra_users = extra_users or []
|
||||
|
||||
|
||||
@@ -67,19 +67,14 @@ class ProfileHandler:
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
try:
|
||||
displayname = await self.store.get_profile_displayname(
|
||||
target_user.localpart
|
||||
)
|
||||
avatar_url = await self.store.get_profile_avatar_url(
|
||||
target_user.localpart
|
||||
)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
raise
|
||||
profileinfo = await self.store.get_profileinfo(target_user.localpart)
|
||||
if profileinfo.display_name is None:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
|
||||
return {"displayname": displayname, "avatar_url": avatar_url}
|
||||
return {
|
||||
"displayname": profileinfo.display_name,
|
||||
"avatar_url": profileinfo.avatar_url,
|
||||
}
|
||||
else:
|
||||
try:
|
||||
result = await self.federation.make_query(
|
||||
|
||||
@@ -25,6 +25,7 @@ from synapse.api.constants import (
|
||||
GuestAccess,
|
||||
HistoryVisibility,
|
||||
JoinRules,
|
||||
PublicRoomsFilterFields,
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
@@ -181,6 +182,7 @@ class RoomListHandler:
|
||||
== HistoryVisibility.WORLD_READABLE,
|
||||
"guest_can_join": room["guest_access"] == "can_join",
|
||||
"join_rule": room["join_rules"],
|
||||
"org.matrix.msc3827.room_type": room["room_type"],
|
||||
}
|
||||
|
||||
# Filter out Nones – rather omit the field altogether
|
||||
@@ -239,7 +241,9 @@ class RoomListHandler:
|
||||
response["chunk"] = results
|
||||
|
||||
response["total_room_count_estimate"] = await self.store.count_public_rooms(
|
||||
network_tuple, ignore_non_federatable=from_federation
|
||||
network_tuple,
|
||||
ignore_non_federatable=from_federation,
|
||||
search_filter=search_filter,
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -508,8 +512,21 @@ class RoomListNextBatch:
|
||||
|
||||
|
||||
def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool:
|
||||
if search_filter and search_filter.get("generic_search_term", None):
|
||||
generic_search_term = search_filter["generic_search_term"].upper()
|
||||
"""Determines whether the given search filter matches a room entry returned over
|
||||
federation.
|
||||
|
||||
Only used if the remote server does not support MSC2197 remote-filtered search, and
|
||||
hence does not support MSC3827 filtering of `/publicRooms` by room type either.
|
||||
|
||||
In this case, we cannot apply the `room_type` filter since no `room_type` field is
|
||||
returned.
|
||||
"""
|
||||
if search_filter and search_filter.get(
|
||||
PublicRoomsFilterFields.GENERIC_SEARCH_TERM, None
|
||||
):
|
||||
generic_search_term = search_filter[
|
||||
PublicRoomsFilterFields.GENERIC_SEARCH_TERM
|
||||
].upper()
|
||||
if generic_search_term in room_entry.get("name", "").upper():
|
||||
return True
|
||||
elif generic_search_term in room_entry.get("topic", "").upper():
|
||||
|
||||
@@ -101,19 +101,33 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
|
||||
)
|
||||
|
||||
# Ratelimiter for invites, keyed by room (across all issuers, all
|
||||
# recipients).
|
||||
self._invites_per_room_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
|
||||
)
|
||||
self._invites_per_user_limiter = Ratelimiter(
|
||||
|
||||
# Ratelimiter for invites, keyed by recipient (across all rooms, all
|
||||
# issuers).
|
||||
self._invites_per_recipient_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
|
||||
)
|
||||
|
||||
# Ratelimiter for invites, keyed by issuer (across all rooms, all
|
||||
# recipients).
|
||||
self._invites_per_issuer_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_invites_per_issuer.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_invites_per_issuer.burst_count,
|
||||
)
|
||||
|
||||
self._third_party_invite_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
@@ -258,7 +272,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if room_id:
|
||||
await self._invites_per_room_limiter.ratelimit(requester, room_id)
|
||||
|
||||
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
|
||||
await self._invites_per_recipient_limiter.ratelimit(requester, invitee_user_id)
|
||||
if requester is not None:
|
||||
await self._invites_per_issuer_limiter.ratelimit(requester)
|
||||
|
||||
async def _local_membership_update(
|
||||
self,
|
||||
@@ -830,10 +846,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
content["membership"] = Membership.JOIN
|
||||
|
||||
profile = self.profile_handler
|
||||
if not content_specified:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
try:
|
||||
profile = self.profile_handler
|
||||
if not content_specified:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information while processing remote join for %r: %s",
|
||||
target,
|
||||
e,
|
||||
)
|
||||
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
@@ -910,11 +933,18 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
content["membership"] = Membership.KNOCK
|
||||
|
||||
profile = self.profile_handler
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
try:
|
||||
profile = self.profile_handler
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information while processing remote knock for %r: %s",
|
||||
target,
|
||||
e,
|
||||
)
|
||||
|
||||
return await self.remote_knock(
|
||||
remote_room_hosts, room_id, target, content
|
||||
|
||||
@@ -271,6 +271,9 @@ class StatsHandler:
|
||||
room_state["is_federatable"] = (
|
||||
event_content.get(EventContentFields.FEDERATE, True) is True
|
||||
)
|
||||
room_type = event_content.get(EventContentFields.ROOM_TYPE)
|
||||
if isinstance(room_type, str):
|
||||
room_state["room_type"] = room_type
|
||||
elif typ == EventTypes.JoinRules:
|
||||
room_state["join_rules"] = event_content.get("join_rule")
|
||||
elif typ == EventTypes.RoomHistoryVisibility:
|
||||
|
||||
@@ -164,6 +164,7 @@ Gotchas
|
||||
with an active span?
|
||||
"""
|
||||
import contextlib
|
||||
import enum
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
@@ -268,7 +269,7 @@ try:
|
||||
|
||||
_reporter: Reporter = attr.Factory(Reporter)
|
||||
|
||||
def set_process(self, *args, **kwargs):
|
||||
def set_process(self, *args: Any, **kwargs: Any) -> None:
|
||||
return self._reporter.set_process(*args, **kwargs)
|
||||
|
||||
def report_span(self, span: "opentracing.Span") -> None:
|
||||
@@ -319,7 +320,11 @@ _homeserver_whitelist: Optional[Pattern[str]] = None
|
||||
|
||||
# Util methods
|
||||
|
||||
Sentinel = object()
|
||||
|
||||
class _Sentinel(enum.Enum):
|
||||
# defining a sentinel in this way allows mypy to correctly handle the
|
||||
# type of a dictionary lookup.
|
||||
sentinel = object()
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
@@ -339,12 +344,12 @@ def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]:
|
||||
return _only_if_tracing_inner
|
||||
|
||||
|
||||
def ensure_active_span(message, ret=None):
|
||||
def ensure_active_span(message: str, ret=None):
|
||||
"""Executes the operation only if opentracing is enabled and there is an active span.
|
||||
If there is no active span it logs message at the error level.
|
||||
|
||||
Args:
|
||||
message (str): Message which fills in "There was no active span when trying to %s"
|
||||
message: Message which fills in "There was no active span when trying to %s"
|
||||
in the error log if there is no active span and opentracing is enabled.
|
||||
ret (object): return value if opentracing is None or there is no active span.
|
||||
|
||||
@@ -402,7 +407,7 @@ def init_tracer(hs: "HomeServer") -> None:
|
||||
config = JaegerConfig(
|
||||
config=hs.config.tracing.jaeger_config,
|
||||
service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
|
||||
scope_manager=LogContextScopeManager(hs.config),
|
||||
scope_manager=LogContextScopeManager(),
|
||||
metrics_factory=PrometheusMetricsFactory(),
|
||||
)
|
||||
|
||||
@@ -451,15 +456,15 @@ def whitelisted_homeserver(destination: str) -> bool:
|
||||
|
||||
# Could use kwargs but I want these to be explicit
|
||||
def start_active_span(
|
||||
operation_name,
|
||||
child_of=None,
|
||||
references=None,
|
||||
tags=None,
|
||||
start_time=None,
|
||||
ignore_active_span=False,
|
||||
finish_on_close=True,
|
||||
operation_name: str,
|
||||
child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
|
||||
references: Optional[List["opentracing.Reference"]] = None,
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
start_time: Optional[float] = None,
|
||||
ignore_active_span: bool = False,
|
||||
finish_on_close: bool = True,
|
||||
*,
|
||||
tracer=None,
|
||||
tracer: Optional["opentracing.Tracer"] = None,
|
||||
):
|
||||
"""Starts an active opentracing span.
|
||||
|
||||
@@ -493,11 +498,11 @@ def start_active_span(
|
||||
def start_active_span_follows_from(
|
||||
operation_name: str,
|
||||
contexts: Collection,
|
||||
child_of=None,
|
||||
child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
|
||||
start_time: Optional[float] = None,
|
||||
*,
|
||||
inherit_force_tracing=False,
|
||||
tracer=None,
|
||||
inherit_force_tracing: bool = False,
|
||||
tracer: Optional["opentracing.Tracer"] = None,
|
||||
):
|
||||
"""Starts an active opentracing span, with additional references to previous spans
|
||||
|
||||
@@ -540,7 +545,7 @@ def start_active_span_from_edu(
|
||||
edu_content: Dict[str, Any],
|
||||
operation_name: str,
|
||||
references: Optional[List["opentracing.Reference"]] = None,
|
||||
tags: Optional[Dict] = None,
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
start_time: Optional[float] = None,
|
||||
ignore_active_span: bool = False,
|
||||
finish_on_close: bool = True,
|
||||
@@ -617,23 +622,27 @@ def set_operation_name(operation_name: str) -> None:
|
||||
|
||||
|
||||
@only_if_tracing
|
||||
def force_tracing(span=Sentinel) -> None:
|
||||
def force_tracing(
|
||||
span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel
|
||||
) -> None:
|
||||
"""Force sampling for the active/given span and its children.
|
||||
|
||||
Args:
|
||||
span: span to force tracing for. By default, the active span.
|
||||
"""
|
||||
if span is Sentinel:
|
||||
span = opentracing.tracer.active_span
|
||||
if span is None:
|
||||
if isinstance(span, _Sentinel):
|
||||
span_to_trace = opentracing.tracer.active_span
|
||||
else:
|
||||
span_to_trace = span
|
||||
if span_to_trace is None:
|
||||
logger.error("No active span in force_tracing")
|
||||
return
|
||||
|
||||
span.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
span_to_trace.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
|
||||
# also set a bit of baggage, so that we have a way of figuring out if
|
||||
# it is enabled later
|
||||
span.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1")
|
||||
span_to_trace.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1")
|
||||
|
||||
|
||||
def is_context_forced_tracing(
|
||||
@@ -789,7 +798,7 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte
|
||||
# Tracing decorators
|
||||
|
||||
|
||||
def trace(func=None, opname=None):
|
||||
def trace(func=None, opname: Optional[str] = None):
|
||||
"""
|
||||
Decorator to trace a function.
|
||||
Sets the operation name to that of the function's or that given
|
||||
@@ -822,11 +831,11 @@ def trace(func=None, opname=None):
|
||||
result = func(*args, **kwargs)
|
||||
if isinstance(result, defer.Deferred):
|
||||
|
||||
def call_back(result):
|
||||
def call_back(result: R) -> R:
|
||||
scope.__exit__(None, None, None)
|
||||
return result
|
||||
|
||||
def err_back(result):
|
||||
def err_back(result: R) -> R:
|
||||
scope.__exit__(None, None, None)
|
||||
return result
|
||||
|
||||
|
||||
@@ -16,11 +16,15 @@ import logging
|
||||
from types import TracebackType
|
||||
from typing import Optional, Type
|
||||
|
||||
from opentracing import Scope, ScopeManager
|
||||
from opentracing import Scope, ScopeManager, Span
|
||||
|
||||
import twisted
|
||||
|
||||
from synapse.logging.context import current_context, nested_logging_context
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
current_context,
|
||||
nested_logging_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -35,11 +39,11 @@ class LogContextScopeManager(ScopeManager):
|
||||
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
def active(self) -> Optional[Scope]:
|
||||
"""
|
||||
Returns the currently active Scope which can be used to access the
|
||||
currently active Scope.span.
|
||||
@@ -48,19 +52,18 @@ class LogContextScopeManager(ScopeManager):
|
||||
Tracer.start_active_span() time.
|
||||
|
||||
Return:
|
||||
(Scope) : the Scope that is active, or None if not
|
||||
available.
|
||||
The Scope that is active, or None if not available.
|
||||
"""
|
||||
ctx = current_context()
|
||||
return ctx.scope
|
||||
|
||||
def activate(self, span, finish_on_close):
|
||||
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
||||
"""
|
||||
Makes a Span active.
|
||||
Args
|
||||
span (Span): the span that should become active.
|
||||
finish_on_close (Boolean): whether Span should be automatically
|
||||
finished when Scope.close() is called.
|
||||
span: the span that should become active.
|
||||
finish_on_close: whether Span should be automatically finished when
|
||||
Scope.close() is called.
|
||||
|
||||
Returns:
|
||||
Scope to control the end of the active period for
|
||||
@@ -112,8 +115,8 @@ class _LogContextScope(Scope):
|
||||
def __init__(
|
||||
self,
|
||||
manager: LogContextScopeManager,
|
||||
span,
|
||||
logcontext,
|
||||
span: Span,
|
||||
logcontext: LoggingContext,
|
||||
enter_logcontext: bool,
|
||||
finish_on_close: bool,
|
||||
):
|
||||
@@ -121,13 +124,13 @@ class _LogContextScope(Scope):
|
||||
Args:
|
||||
manager:
|
||||
the manager that is responsible for this scope.
|
||||
span (Span):
|
||||
span:
|
||||
the opentracing span which this scope represents the local
|
||||
lifetime for.
|
||||
logcontext (LogContext):
|
||||
the logcontext to which this scope is attached.
|
||||
logcontext:
|
||||
the log context to which this scope is attached.
|
||||
enter_logcontext:
|
||||
if True the logcontext will be exited when the scope is finished
|
||||
if True the log context will be exited when the scope is finished
|
||||
finish_on_close:
|
||||
if True finish the span when the scope is closed
|
||||
"""
|
||||
|
||||
@@ -25,6 +25,7 @@ from synapse.replication.http import (
|
||||
push,
|
||||
register,
|
||||
send_event,
|
||||
state,
|
||||
streams,
|
||||
)
|
||||
|
||||
@@ -48,6 +49,7 @@ class ReplicationRestResource(JsonResource):
|
||||
streams.register_servlets(hs, self)
|
||||
account_data.register_servlets(hs, self)
|
||||
push.register_servlets(hs, self)
|
||||
state.register_servlets(hs, self)
|
||||
|
||||
# The following can't currently be instantiated on workers.
|
||||
if hs.config.worker.worker_app is None:
|
||||
|
||||
@@ -60,6 +60,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
||||
{
|
||||
"max_stream_id": 32443,
|
||||
}
|
||||
|
||||
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
|
||||
context that needs to be recomputed due to the un-partial stating of a room.
|
||||
"""
|
||||
|
||||
NAME = "fed_send_events"
|
||||
|
||||
@@ -59,6 +59,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
||||
|
||||
{ "stream_id": 12345, "event_id": "$abcdef..." }
|
||||
|
||||
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
|
||||
context that needs to be recomputed due to the un-partial stating of a room.
|
||||
|
||||
The returned event ID may not match the sent event if it was deduplicated.
|
||||
"""
|
||||
|
||||
|
||||
75
synapse/replication/http/state.py
Normal file
75
synapse/replication/http/state.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReplicationUpdateCurrentStateRestServlet(ReplicationEndpoint):
|
||||
"""Recalculates the current state for a room, and persists it.
|
||||
|
||||
The API looks like:
|
||||
|
||||
POST /_synapse/replication/update_current_state/:room_id
|
||||
|
||||
{}
|
||||
|
||||
200 OK
|
||||
|
||||
{}
|
||||
"""
|
||||
|
||||
NAME = "update_current_state"
|
||||
PATH_ARGS = ("room_id",)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self._state_handler = hs.get_state_handler()
|
||||
self._events_shard_config = hs.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override]
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
writer_instance = self._events_shard_config.get_instance(room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
raise SynapseError(
|
||||
400, "/update_current_state request was routed to the wrong worker"
|
||||
)
|
||||
|
||||
await self._state_handler.update_current_state(room_id)
|
||||
|
||||
return 200, {}
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.get_instance_name() in hs.config.worker.writers.events:
|
||||
ReplicationUpdateCurrentStateRestServlet(hs).register(http_server)
|
||||
@@ -15,11 +15,11 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.api.errors import AuthError, NotFoundError, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, RoomID
|
||||
|
||||
from ._base import client_patterns
|
||||
|
||||
@@ -104,6 +104,13 @@ class RoomAccountDataServlet(RestServlet):
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Cannot add account data for other users.")
|
||||
|
||||
if not RoomID.is_valid(room_id):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"{room_id} is not a valid room ID",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if account_data_type == "m.fully_read":
|
||||
@@ -111,6 +118,7 @@ class RoomAccountDataServlet(RestServlet):
|
||||
405,
|
||||
"Cannot set m.fully_read through this API."
|
||||
" Use /rooms/!roomId:server.name/read_markers",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
await self.handler.add_account_data_to_room(
|
||||
@@ -130,6 +138,13 @@ class RoomAccountDataServlet(RestServlet):
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Cannot get account data for other users.")
|
||||
|
||||
if not RoomID.is_valid(room_id):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"{room_id} is not a valid room ID",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
event = await self.store.get_account_data_for_room_and_type(
|
||||
user_id, room_id, account_data_type
|
||||
)
|
||||
|
||||
@@ -95,6 +95,8 @@ class VersionsRestServlet(RestServlet):
|
||||
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
|
||||
# Supports receiving private read receipts as per MSC2285
|
||||
"org.matrix.msc2285": self.config.experimental.msc2285_enabled,
|
||||
# Supports filtering of /publicRooms by room type MSC3827
|
||||
"org.matrix.msc3827": self.config.experimental.msc3827_enabled,
|
||||
# Adds support for importing historical messages as per MSC2716
|
||||
"org.matrix.msc2716": self.config.experimental.msc2716_enabled,
|
||||
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
|
||||
|
||||
@@ -43,6 +43,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersio
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.logging.context import ContextResourceUsage
|
||||
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
|
||||
from synapse.state import v1, v2
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
@@ -129,6 +130,12 @@ class StateHandler:
|
||||
self.hs = hs
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._events_shard_config = hs.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
self._update_current_state_client = (
|
||||
ReplicationUpdateCurrentStateRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
async def get_current_state_ids(
|
||||
self,
|
||||
@@ -249,8 +256,12 @@ class StateHandler:
|
||||
partial_state = True
|
||||
|
||||
logger.debug("calling resolve_state_groups from compute_event_context")
|
||||
# we've already taken into account partial state, so no need to wait for
|
||||
# complete state here.
|
||||
entry = await self.resolve_state_groups_for_events(
|
||||
event.room_id, event.prev_event_ids()
|
||||
event.room_id,
|
||||
event.prev_event_ids(),
|
||||
await_full_state=False,
|
||||
)
|
||||
|
||||
state_ids_before_event = entry.state
|
||||
@@ -335,7 +346,7 @@ class StateHandler:
|
||||
|
||||
@measure_func()
|
||||
async def resolve_state_groups_for_events(
|
||||
self, room_id: str, event_ids: Collection[str]
|
||||
self, room_id: str, event_ids: Collection[str], await_full_state: bool = True
|
||||
) -> _StateCacheEntry:
|
||||
"""Given a list of event_ids this method fetches the state at each
|
||||
event, resolves conflicts between them and returns them.
|
||||
@@ -343,6 +354,8 @@ class StateHandler:
|
||||
Args:
|
||||
room_id
|
||||
event_ids
|
||||
await_full_state: if true, will block if we do not yet have complete
|
||||
state at these events.
|
||||
|
||||
Returns:
|
||||
The resolved state
|
||||
@@ -350,7 +363,7 @@ class StateHandler:
|
||||
logger.debug("resolve_state_groups event_ids %s", event_ids)
|
||||
|
||||
state_groups = await self._state_storage_controller.get_state_group_for_events(
|
||||
event_ids
|
||||
event_ids, await_full_state=await_full_state
|
||||
)
|
||||
|
||||
state_group_ids = state_groups.values()
|
||||
@@ -417,6 +430,24 @@ class StateHandler:
|
||||
|
||||
return {key: state_map[ev_id] for key, ev_id in new_state.items()}
|
||||
|
||||
async def update_current_state(self, room_id: str) -> None:
|
||||
"""Recalculates the current state for a room, and persists it.
|
||||
|
||||
Raises:
|
||||
SynapseError(502): if all attempts to connect to the event persister worker
|
||||
fail
|
||||
"""
|
||||
writer_instance = self._events_shard_config.get_instance(room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
await self._update_current_state_client(
|
||||
instance_name=writer_instance,
|
||||
room_id=room_id,
|
||||
)
|
||||
return
|
||||
|
||||
assert self._storage_controllers.persistence is not None
|
||||
await self._storage_controllers.persistence.update_current_state(room_id)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class _StateResMetrics:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user