Compare commits
262 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 835da407c8 | |||
| 46a64f473d | |||
| cec8ef5890 | |||
| 0f48724e00 | |||
| 7780a71abf | |||
| 55c6252edd | |||
| 6e2140364b | |||
| 6e5663b291 | |||
| b81cab0794 | |||
| d8dc12a95c | |||
| f8f06fc773 | |||
| 05e8a5d298 | |||
| 3e2e76ca15 | |||
| dfc7646504 | |||
| 9f512ff537 | |||
| 88fe72cc1e | |||
| 9b1f360091 | |||
| 643c0c50c1 | |||
| e31d06f6f0 | |||
| 3810730ba5 | |||
| 641f43ba81 | |||
| 1783156dbc | |||
| 4e13743738 | |||
| 3ad74b63e5 | |||
| 5f8173dd80 | |||
| ab3165efb7 | |||
| 4586119f0b | |||
| 214f3b7d21 | |||
| 772bad2562 | |||
| 3cdf5a1386 | |||
| 961ee75a9b | |||
| 5f72ea1bde | |||
| 85ca963c1a | |||
| 98ec375b26 | |||
| e630722f11 | |||
| 0cd182f296 | |||
| dd5cc37aa4 | |||
| 95a038c106 | |||
| 2e2d8cc2f9 | |||
| 7851a2c62f | |||
| 78e4d96a4d | |||
| 7732c4902c | |||
| 36af768c13 | |||
| 1a90c1e3af | |||
| d1cd96ce29 | |||
| 3a7e97c7ad | |||
| 0bcb651b3f | |||
| 350062661c | |||
| f931c0602a | |||
| 6902e9ff2b | |||
| 05a37f4008 | |||
| 2cf74cf2fc | |||
| 6fe757d69e | |||
| ae01a7edd3 | |||
| 793d03e2c5 | |||
| 573cd0f92f | |||
| 7ec9b06303 | |||
| fd1e7d0fc2 | |||
| 163fd686b5 | |||
| 79e7c2c426 | |||
| 31c1209c50 | |||
| 9c4c49991d | |||
| 800ba87cc8 | |||
| ab3fdcf960 | |||
| 41b5f72677 | |||
| 66053b6bfb | |||
| d666fc02fa | |||
| ac80bfba42 | |||
| 42d8710f38 | |||
| 708d88b1a2 | |||
| 7a95e80418 | |||
| efdbcfd6af | |||
| ca7e34cb57 | |||
| a7293ef16f | |||
| 5218fe7670 | |||
| f608e6c8cf | |||
| 9633eb2162 | |||
| b446c99ac9 | |||
| 5c9e39e619 | |||
| 80839a44f1 | |||
| f0b03186d9 | |||
| 33ebee47e4 | |||
| c4cf916ed7 | |||
| 336bff1104 | |||
| 993d90f82b | |||
| 21351820e0 | |||
| b7762b0c9f | |||
| f871222880 | |||
| 319a805cd3 | |||
| 9b43df1f7b | |||
| e4409301ba | |||
| 4e900ece42 | |||
| bebf994ee8 | |||
| 6927d87254 | |||
| 11df4ec6c2 | |||
| 5e88143dff | |||
| 34a8370d7b | |||
| adbf975623 | |||
| 15cdcf8f30 | |||
| 5a32ec59b2 | |||
| 9a3f1f5383 | |||
| f96b85eca8 | |||
| c31c1091d4 | |||
| d8d0271977 | |||
| 2fc15ac718 | |||
| a7fb66e800 | |||
| 19a1d6a42a | |||
| c8cbd66d3b | |||
| 9b67715bc3 | |||
| 437a8ed9ef | |||
| e0bb268134 | |||
| 1f32b90b0f | |||
| 4d693f9b79 | |||
| 013f3f5e44 | |||
| 8a519f8abc | |||
| a2b00a4486 | |||
| 8a5d691140 | |||
| 512007f829 | |||
| e9220adffc | |||
| 28a64807b2 | |||
| d653f6fbec | |||
| c20d0ca6c2 | |||
| b690fe749b | |||
| 6f2943714b | |||
| 287a9c1e20 | |||
| ac95167d2f | |||
| 4ba55a620f | |||
| 8cd760fca8 | |||
| 89f11f8c6f | |||
| a4643a685c | |||
| 3c41d87b67 | |||
| 7ca8ee67a5 | |||
| 38adf14998 | |||
| 14662d3c18 | |||
| fffb3c4c8f | |||
| 61aae18d45 | |||
| 5859e2fe0c | |||
| 8b7b371ff6 | |||
| b0659a112d | |||
| 1800bd47a8 | |||
| 9925f9b8b0 | |||
| 1642abd77e | |||
| 84eb14c4d2 | |||
| 0004260952 | |||
| a503c2c388 | |||
| e689cae47d | |||
| 088f3ae182 | |||
| 8810c93e82 | |||
| 4df10d3214 | |||
| 5436b014f4 | |||
| e78d4f61fc | |||
| f4c5e5864c | |||
| c5776780f0 | |||
| 692b82838e | |||
| 516d092ff9 | |||
| 831d4797ab | |||
| 6b26536a52 | |||
| a701a09f9b | |||
| 34baf76451 | |||
| 01211e0c16 | |||
| d9bc65918e | |||
| 9d21ecf7ce | |||
| 0a59f977a2 | |||
| 6134b3079e | |||
| 1530cef192 | |||
| afa17f0eab | |||
| bf9d549e3a | |||
| 8fe930c215 | |||
| 80e0e1f35e | |||
| 2177e356bc | |||
| c46065fa3d | |||
| 872dbb0181 | |||
| 12d1f82db2 | |||
| 9e06e22064 | |||
| 3f7cfbc9e5 | |||
| f70afbd565 | |||
| 96274565ff | |||
| 6121056740 | |||
| fc9bd620ce | |||
| c486fa5fd9 | |||
| 86965605a4 | |||
| 1da0f79d54 | |||
| 4587b35929 | |||
| dda9b7fc4d | |||
| dea577998f | |||
| 5dd949bee6 | |||
| 9e90d643e6 | |||
| d1130a249b | |||
| 2fcf4b3f6c | |||
| 605d161d7d | |||
| 8e5706d144 | |||
| 90b2327066 | |||
| 54f674f7a9 | |||
| ef3619e61d | |||
| e6a106fd5e | |||
| 4a53f35737 | |||
| 735e89bd3a | |||
| 003cc6910a | |||
| 32c828d0f7 | |||
| e10a2fe0c2 | |||
| bc9dff1d95 | |||
| 3b12f6d61b | |||
| 483f2aa2ec | |||
| 7577894bec | |||
| ed9aea42fa | |||
| 72e7f1c420 | |||
| ea27528b5d | |||
| 52a947dc46 | |||
| 88cd6f9378 | |||
| 3e4af36bc8 | |||
| a4c1fdb44a | |||
| 15382b1afa | |||
| 690cb4f3b3 | |||
| 032688854b | |||
| 180d8ff0d4 | |||
| dc8d825ef2 | |||
| 9a0172d49f | |||
| 5627182788 | |||
| 0dc9c5653c | |||
| bfa7d6b035 | |||
| b1989ced00 | |||
| 65e02b3e6d | |||
| 2ce27a24fe | |||
| ca9234a9eb | |||
| d8bab6793c | |||
| 094802e04e | |||
| ea992adf86 | |||
| 2eef234ae3 | |||
| 26211fec24 | |||
| f63bedef07 | |||
| 0211f18d65 | |||
| 00a67f831a | |||
| d2ef1a79cf | |||
| 0752ab7a36 | |||
| 75574726a7 | |||
| 158e0937eb | |||
| cd1ae3d0b4 | |||
| 36071d39f7 | |||
| 4aeb00ca20 | |||
| 423cca9efe | |||
| 87c230c27c | |||
| d56202b038 | |||
| 8533c8b03d | |||
| fb0ffa9676 | |||
| 9297d040a7 | |||
| 7e91107be1 | |||
| 1d11b452b7 | |||
| cea1b58c4a | |||
| a511a890d7 | |||
| 61fd2a8f59 | |||
| 31b125ccec | |||
| ae8a616b49 | |||
| 11282ade1d | |||
| 1fbe0316a9 | |||
| 106959b3cf | |||
| 2ffaf30803 | |||
| b4461e7d8a | |||
| 594a07ede4 | |||
| 6d282a9c89 | |||
| 1103c5fe8a | |||
| f3f0ab10fe | |||
| 6adb89ff00 |
@@ -2,29 +2,24 @@
|
||||
|
||||
# Test for the export-data admin command against sqlite and postgres
|
||||
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
@@ -37,14 +32,14 @@ else
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it
|
||||
# - installs the minimal system requirements, and poetry;
|
||||
# - patches the project definition file to refer to old versions only;
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
# - invokes `trial` to run the tests with old deps.
|
||||
|
||||
# Prevent tzdata from asking for user input
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
@@ -9,12 +12,70 @@ set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
python3 python3-dev python3-pip python3-venv \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev
|
||||
python3 python3-dev python3-pip python3-venv pipx \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
exec tox -e py3-old
|
||||
# TODO: in the future, we could use an implementation of
|
||||
# https://github.com/python-poetry/poetry/issues/3527
|
||||
# https://github.com/pypa/pip/issues/8085
|
||||
# to select the lowest possible versions, rather than resorting to this sed script.
|
||||
|
||||
# Patch the project definitions in-place:
|
||||
# - Replace all lower and tilde bounds with exact bounds
|
||||
# - Make the pyopenssl 17.0, which is the oldest version that works with
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
|
||||
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
|
||||
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
|
||||
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
|
||||
# runs on 3.8 (#12343).
|
||||
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e "/psycopg2/d" \
|
||||
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
|
||||
-e '/systemd/d' \
|
||||
pyproject.toml
|
||||
|
||||
# Use poetry to do the installation. This ensures that the versions are all mutually
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install --user toml
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pipx install poetry==1.1.12
|
||||
~/.local/bin/poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
~/.local/bin/poetry install -E "all test"
|
||||
~/.local/bin/poetry run trial --jobs=2 tests
|
||||
|
||||
@@ -1,41 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - sets up synapse and deps
|
||||
# - configures synapse and a postgres server.
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
|
||||
#
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2 coverage coverage-enable-subprocess
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
@@ -46,12 +42,12 @@ echo "--- Prepare empty SQLite database"
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .ci/test_db.db
|
||||
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py \
|
||||
poetry run .ci/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
+5
-3
@@ -3,11 +3,13 @@
|
||||
|
||||
# things to include
|
||||
!docker
|
||||
!scripts
|
||||
!synapse
|
||||
!MANIFEST.in
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
|
||||
# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
|
||||
!MANIFEST.in
|
||||
!setup.py
|
||||
!synctl
|
||||
|
||||
**/__pycache__
|
||||
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
with:
|
||||
mdbook-version: '0.4.9'
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
dists='["debian:sid"]'
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages --show-dists-json)
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "::set-output name=distros::$dists"
|
||||
# map the step outputs to job outputs
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
# for the cache magic here
|
||||
run: |
|
||||
./src/scripts-dev/build_debian_packages \
|
||||
./src/scripts-dev/build_debian_packages.py \
|
||||
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||
--docker-build-arg=--progress=plain \
|
||||
@@ -112,7 +112,8 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
python-dist/*
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
|
||||
+20
-45
@@ -16,7 +16,8 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install -e .
|
||||
- run: scripts-dev/generate_sample_config --check
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -26,7 +27,6 @@ jobs:
|
||||
- "check_codestyle"
|
||||
- "check_isort"
|
||||
- "mypy"
|
||||
- "packaging"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
@@ -128,6 +128,7 @@ jobs:
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
@@ -135,11 +136,11 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -270,9 +271,10 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.9"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
@@ -307,9 +309,10 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
@@ -361,60 +364,32 @@ jobs:
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
|
||||
# Build initial Synapse image
|
||||
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
working-directory: synapse
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
|
||||
# Build a ready-to-run Synapse image based on the initial image above.
|
||||
# This new image includes a config file, keys for signing and TLS, and
|
||||
# other settings to make it suitable for testing under Complement.
|
||||
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
|
||||
working-directory: complement/dockerfiles
|
||||
|
||||
# Run Complement
|
||||
- run: |
|
||||
set -o pipefail
|
||||
go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
|
||||
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
||||
working-directory: complement
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- check-sampleconfig
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set build result
|
||||
env:
|
||||
NEEDS_CONTEXT: ${{ toJSON(needs) }}
|
||||
# the `jq` incantation dumps out a series of "<job> <result>" lines.
|
||||
# we set it to an intermediate variable to avoid a pipe, which makes it
|
||||
# hard to set $rc.
|
||||
run: |
|
||||
rc=0
|
||||
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
||||
while read job result ; do
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
|
||||
continue
|
||||
fi
|
||||
- uses: matrix-org/done-action@v2
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
if [ "$result" != "success" ]; then
|
||||
echo "::set-failed ::Job $job returned $result"
|
||||
rc=1
|
||||
fi
|
||||
done <<< $results
|
||||
exit $rc
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
skippable:
|
||||
lint-newsfile
|
||||
|
||||
@@ -15,6 +15,10 @@ _trial_temp*/
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# We do want the poetry lockfile. TODO: is there a good reason for ignoring
|
||||
# '*.lock' above? If not, let's nuke it.
|
||||
!poetry.lock
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
/*.log
|
||||
@@ -30,6 +34,9 @@ __pycache__/
|
||||
/media_store/
|
||||
/uploads
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
|
||||
+351
-9394
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,3 @@
|
||||
include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
@@ -17,7 +16,6 @@ recursive-include synapse/storage *.txt
|
||||
recursive-include synapse/storage *.md
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
@@ -53,5 +51,4 @@ prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune snap
|
||||
prune stubs
|
||||
|
||||
@@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports
|
||||
|
||||
(to stop, you can use `./demo/stop.sh`)
|
||||
|
||||
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
|
||||
for more information.
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Optimise room backfill, to reduce memory usage.
|
||||
@@ -193,12 +193,15 @@ class TrivialXmppClient:
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
ssrcMsg = (
|
||||
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||
% {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
)
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print("reply from ssrc announce: ", res)
|
||||
time.sleep(10)
|
||||
|
||||
@@ -20,7 +20,7 @@ apps:
|
||||
generate-config:
|
||||
command: generate_config
|
||||
generate-signing-key:
|
||||
command: generate_signing_key.py
|
||||
command: generate_signing_key
|
||||
register-new-matrix-user:
|
||||
command: register_new_matrix_user
|
||||
plugs: [network]
|
||||
Vendored
+60
@@ -1,3 +1,63 @@
|
||||
matrix-synapse-py3 (1.57.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.57.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.57.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.57.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
|
||||
|
||||
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.57.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
|
||||
|
||||
matrix-synapse-py3 (1.56.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.56.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Apr 2022 12:38:39 +0100
|
||||
|
||||
matrix-synapse-py3 (1.56.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.56.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Mar 2022 10:40:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.55.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.55.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 19:07:11 +0000
|
||||
|
||||
matrix-synapse-py3 (1.55.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.55.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Mar 2022 17:44:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.55.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.55.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Mar 2022 13:59:26 +0000
|
||||
|
||||
matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.55.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Mar 2022 10:59:31 +0000
|
||||
|
||||
matrix-synapse-py3 (1.54.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.54.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Mar 2022 10:54:52 +0000
|
||||
|
||||
matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.54.0~rc1.
|
||||
|
||||
+4
-7
@@ -1,7 +1,4 @@
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
*.pid
|
||||
|
||||
/media_store.*
|
||||
/etc
|
||||
# Ignore all the temporary files from the demo servers.
|
||||
8080/
|
||||
8081/
|
||||
8082/
|
||||
|
||||
-26
@@ -1,26 +0,0 @@
|
||||
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
|
||||
|
||||
Requires you to have done:
|
||||
python setup.py develop
|
||||
|
||||
|
||||
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
|
||||
|
||||
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
|
||||
and are configured in a highly insecure way. Do not use these configuration files in production.
|
||||
|
||||
stop.sh will stop the synapse servers and the webclient.
|
||||
|
||||
clean.sh will delete the databases and log files.
|
||||
|
||||
To start a completely new set of servers, run:
|
||||
|
||||
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
||||
|
||||
|
||||
Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
|
||||
|
||||
|
||||
|
||||
Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.
|
||||
|
||||
@@ -4,6 +4,9 @@ set -e
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
# Ensure that the servers are stopped.
|
||||
$DIR/stop.sh
|
||||
|
||||
PID_FILE="$DIR/servers.pid"
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
|
||||
+34
-36
@@ -6,8 +6,6 @@ CWD=$(pwd)
|
||||
|
||||
cd "$DIR/.." || exit
|
||||
|
||||
mkdir -p demo/etc
|
||||
|
||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||
export PYTHONPATH
|
||||
|
||||
@@ -21,22 +19,27 @@ for port in 8080 8081 8082; do
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port || exit
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
# Generate the configuration for the homeserver at localhost:848x.
|
||||
python3 -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
-H "localhost:$https_port" \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--server-name "localhost:$port" \
|
||||
--config-path "$port.config" \
|
||||
--report-stats no
|
||||
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
|
||||
# Generate TLS keys.
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout "localhost:$port.tls.key" \
|
||||
-out "localhost:$port.tls.crt" \
|
||||
-days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
# Regenerate configuration
|
||||
# Add customisations to the configuration.
|
||||
{
|
||||
printf '\n\n# Customisation made by demo/start.sh\n'
|
||||
printf '\n\n# Customisation made by demo/start.sh\n\n'
|
||||
echo "public_baseurl: http://localhost:$port/"
|
||||
echo 'enable_registration: true'
|
||||
echo 'enable_registration_without_verification: true'
|
||||
echo ''
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||
# Please don't accidentaly bork me with your fancy settings.
|
||||
@@ -63,38 +66,34 @@ for port in 8080 8081 8082; do
|
||||
|
||||
echo "${listeners}"
|
||||
|
||||
# Disable tls for the servers
|
||||
printf '\n\n# Disable tls on the servers.'
|
||||
# Disable TLS for the servers
|
||||
printf '\n\n# Disable TLS for the servers.'
|
||||
echo '# DO NOT USE IN PRODUCTION'
|
||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
|
||||
echo 'federation_verify_certificates: false'
|
||||
|
||||
# Set tls paths
|
||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
|
||||
# Set paths for the TLS certificates.
|
||||
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server'
|
||||
echo 'trusted_key_servers:'
|
||||
echo ' - server_name: "matrix.org"'
|
||||
echo ' accept_keys_insecurely: true'
|
||||
echo ''
|
||||
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
# Allow the servers to communicate over localhost.
|
||||
allow_list=$(cat <<-ALLOW_LIST
|
||||
# Allow the servers to communicate over localhost.
|
||||
ip_range_whitelist:
|
||||
- '127.0.0.1/8'
|
||||
- '::1/128'
|
||||
ALLOW_LIST
|
||||
)
|
||||
|
||||
echo "${blacklist}"
|
||||
} >> "$DIR/etc/$port.config"
|
||||
echo "${allow_list}"
|
||||
} >> "$port.config"
|
||||
fi
|
||||
|
||||
# Check script parameters
|
||||
@@ -141,19 +140,18 @@ for port in 8080 8081 8082; do
|
||||
burst_count: 1000
|
||||
RC
|
||||
)
|
||||
echo "${ratelimiting}" >> "$DIR/etc/$port.config"
|
||||
echo "${ratelimiting}" >> "$port.config"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
|
||||
echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
|
||||
fi
|
||||
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
|
||||
echo "report_stats: false" >> "$DIR/etc/$port.config"
|
||||
# Always disable reporting of stats if the option is not there.
|
||||
if ! grep -F "report_stats" -q "$port.config" ; then
|
||||
echo "report_stats: false" >> "$port.config"
|
||||
fi
|
||||
|
||||
# Run the homeserver in the background.
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--config-path "$port.config" \
|
||||
-D \
|
||||
|
||||
popd || exit
|
||||
|
||||
+60
-22
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:experimental
|
||||
# Dockerfile to build the matrixdotorg/synapse docker images.
|
||||
#
|
||||
# Note that it uses features which are only available in BuildKit - see
|
||||
@@ -14,20 +15,61 @@
|
||||
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 .
|
||||
#
|
||||
|
||||
# Irritatingly, there is no blessed guide on how to distribute an application with its
|
||||
# poetry-managed environment in a docker image. We have opted for
|
||||
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
|
||||
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
|
||||
# In case we get bitten by those bugs in the future, the recommendations here might
|
||||
# be useful:
|
||||
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
|
||||
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
|
||||
|
||||
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
|
||||
###
|
||||
### Stage 0: builder
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
# Here we use it to set up a cache for apt (and below for pip), to improve
|
||||
# rebuild speeds on slow connections.
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update && apt-get install -y git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
|
||||
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
|
||||
# https://github.com/python-poetry/poetry/pull/5156 and
|
||||
# https://github.com/python-poetry/poetry/issues/5141 ;
|
||||
# without it, we generate a requirements.txt with incorrect environment markers,
|
||||
# which causes necessary packages to be omitted when we `pip install`.
|
||||
#
|
||||
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
|
||||
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
# Copy just what we need to run `poetry export`...
|
||||
COPY pyproject.toml poetry.lock README.rst /synapse/
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
|
||||
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
|
||||
# install the OS build deps
|
||||
#
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
# Here we use it to set up a cache for apt, to improve rebuild speeds on
|
||||
# slow connections.
|
||||
#
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
@@ -45,31 +87,27 @@ RUN \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy just what we need to pip install
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project so that we this layer in the Docker cache can be
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
#
|
||||
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
|
||||
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
|
||||
COPY --from=requirements /synapse/requirements.txt /synapse/
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --prefix="/install" --no-warn-script-location \
|
||||
/synapse[all]
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
|
||||
|
||||
# Copy over the rest of the project
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
# ... and what we need to `pip install`.
|
||||
# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
|
||||
# pyproject.toml here, ditching setup.py and MANIFEST.in.
|
||||
COPY setup.py MANIFEST.in README.rst /synapse/
|
||||
|
||||
# Install the synapse package itself and all of its children packages.
|
||||
#
|
||||
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
|
||||
# Install the synapse package itself.
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
###
|
||||
### Stage 1: runtime
|
||||
### Stage 2: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Use the Sytest image that comes with a lot of the build dependencies
|
||||
# pre-installed
|
||||
FROM matrixdotorg/sytest:focal
|
||||
|
||||
# The Sytest image doesn't come with python, so install that
|
||||
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
|
||||
# We need tox to run the tests in run_pg_tests.sh
|
||||
RUN python3 -m pip install tox
|
||||
|
||||
# Initialise the db
|
||||
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
|
||||
|
||||
# Add a user with our UID and GID so that files get created on the host owned
|
||||
# by us, not root.
|
||||
ARG UID
|
||||
ARG GID
|
||||
RUN groupadd --gid $GID user
|
||||
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
|
||||
|
||||
# Ensure we can start postgres by sudo-ing as the postgres user.
|
||||
RUN apt-get update && apt-get -qq install -y sudo
|
||||
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
ADD run_pg_tests.sh /run_pg_tests.sh
|
||||
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
|
||||
# so that we can `docker run` this container and pass arguments to pg_tests.sh
|
||||
ENTRYPOINT ["/run_pg_tests.sh"]
|
||||
|
||||
USER user
|
||||
@@ -14,9 +14,6 @@ COPY ./docker/conf-workers/* /conf/
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
# Volume for user-editable config files, logs etc.
|
||||
VOLUME ["/data"]
|
||||
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
|
||||
+22
-24
@@ -10,10 +10,10 @@ Note that running Synapse's unit tests from within the docker image is not suppo
|
||||
|
||||
## Testing with SQLite and single-process Synapse
|
||||
|
||||
> Note that `scripts-dev/complement.sh` is a script that will automatically build
|
||||
> Note that `scripts-dev/complement.sh` is a script that will automatically build
|
||||
> and run an SQLite-based, single-process of Synapse against Complement.
|
||||
|
||||
The instructions below will set up Complement testing for a single-process,
|
||||
The instructions below will set up Complement testing for a single-process,
|
||||
SQLite-based Synapse deployment.
|
||||
|
||||
Start by building the base Synapse docker image. If you wish to run tests with the latest
|
||||
@@ -26,23 +26,22 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
|
||||
This will build an image with the tag `matrixdotorg/synapse`.
|
||||
|
||||
Next, build the Synapse image for Complement. You will need a local checkout
|
||||
of Complement. Change to the root of your Complement checkout and run:
|
||||
Next, build the Synapse image for Complement.
|
||||
|
||||
```sh
|
||||
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
|
||||
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
|
||||
```
|
||||
|
||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
||||
how to run the tests, as well as the various available command line flags.
|
||||
|
||||
## Testing with PostgreSQL and single or multi-process Synapse
|
||||
|
||||
The above docker image only supports running Synapse with SQLite and in a
|
||||
single-process topology. The following instructions are used to build a Synapse image for
|
||||
Complement that supports either single or multi-process topology with a PostgreSQL
|
||||
The above docker image only supports running Synapse with SQLite and in a
|
||||
single-process topology. The following instructions are used to build a Synapse image for
|
||||
Complement that supports either single or multi-process topology with a PostgreSQL
|
||||
database backend.
|
||||
|
||||
As with the single-process image, build the base Synapse docker image. If you wish to run
|
||||
@@ -55,7 +54,7 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
|
||||
This will build an image with the tag `matrixdotorg/synapse`.
|
||||
|
||||
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
|
||||
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
|
||||
Again, from the root of the repository:
|
||||
|
||||
```sh
|
||||
@@ -64,21 +63,20 @@ docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
|
||||
|
||||
This will build an image with the tag` matrixdotorg/synapse-workers`.
|
||||
|
||||
It's worth noting at this point that this image is fully functional, and
|
||||
can be used for testing against locally. See instructions for using the container
|
||||
It's worth noting at this point that this image is fully functional, and
|
||||
can be used for testing against locally. See instructions for using the container
|
||||
under
|
||||
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
|
||||
below.
|
||||
|
||||
Finally, build the Synapse image for Complement, which is based on
|
||||
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
|
||||
the root of your Complement checkout and run:
|
||||
`matrixdotorg/synapse-workers`.
|
||||
|
||||
```sh
|
||||
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
|
||||
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
|
||||
```
|
||||
|
||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
||||
This will build an image with the tag `complement-synapse-workers`, which can be handed to
|
||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
||||
how to run the tests, as well as the various available command line flags.
|
||||
@@ -91,10 +89,10 @@ bundling all necessary components together for a workerised homeserver instance.
|
||||
|
||||
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
|
||||
a redis for worker communication and a supervisord instance to start up and monitor all
|
||||
processes. You will need to provide your own postgres container to connect to, and TLS
|
||||
processes. You will need to provide your own postgres container to connect to, and TLS
|
||||
is not handled by the container.
|
||||
|
||||
Once you've built the image using the above instructions, you can run it. Be sure
|
||||
Once you've built the image using the above instructions, you can run it. Be sure
|
||||
you've set up a volume according to the [usual Synapse docker instructions](README.md).
|
||||
Then run something along the lines of:
|
||||
|
||||
@@ -112,7 +110,7 @@ docker run -d --name synapse \
|
||||
matrixdotorg/synapse-workers
|
||||
```
|
||||
|
||||
...substituting `POSTGRES*` variables for those that match a postgres host you have
|
||||
...substituting `POSTGRES*` variables for those that match a postgres host you have
|
||||
available (usually a running postgres docker container).
|
||||
|
||||
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
|
||||
@@ -130,11 +128,11 @@ Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no
|
||||
(leaving only the main process). The container is configured to use redis-based worker
|
||||
mode.
|
||||
|
||||
Logs for workers and the main process are logged to stdout and can be viewed with
|
||||
standard `docker logs` tooling. Worker logs contain their worker name
|
||||
Logs for workers and the main process are logged to stdout and can be viewed with
|
||||
standard `docker logs` tooling. Worker logs contain their worker name
|
||||
after the timestamp.
|
||||
|
||||
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
|
||||
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
|
||||
00, according to the container's clock. Logging for the main process must still be
|
||||
00, according to the container's clock. Logging for the main process must still be
|
||||
configured by modifying the homeserver's log config in your Synapse data volume.
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
# A dockerfile which builds an image suitable for testing Synapse under
|
||||
# complement.
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
|
||||
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
|
||||
|
||||
ENV SERVER_NAME=localhost
|
||||
|
||||
COPY conf/* /conf/
|
||||
|
||||
# generate a signing key
|
||||
RUN generate_signing_key -o /conf/server.signing.key
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
EXPOSE 8008 8448
|
||||
|
||||
ENTRYPOINT ["/conf/start.sh"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
@@ -0,0 +1 @@
|
||||
Stuff for building the docker image used for testing under complement.
|
||||
@@ -0,0 +1,73 @@
|
||||
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
|
||||
# by including a built-in postgres instance, as well as setting up the homeserver so
|
||||
# that it is ready for testing via Complement.
|
||||
#
|
||||
# Instructions for building this image from those it depends on is detailed in this guide:
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
FROM matrixdotorg/synapse-workers
|
||||
|
||||
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
|
||||
# custom CA.
|
||||
# We include this near the top of the file in order to cache the result.
|
||||
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
|
||||
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
|
||||
|
||||
# Install postgresql
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y postgresql
|
||||
|
||||
# Configure a user and create a database for Synapse
|
||||
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
|
||||
\"ALTER USER postgres PASSWORD 'somesecret'; \
|
||||
CREATE DATABASE synapse \
|
||||
ENCODING 'UTF8' \
|
||||
LC_COLLATE='C' \
|
||||
LC_CTYPE='C' \
|
||||
template=template0;\" | psql" && pg_ctlcluster 13 main stop
|
||||
|
||||
# Modify the shared homeserver config with postgres support, certificate setup
|
||||
# and the disabling of rate-limiting
|
||||
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
# Copy the caddy config
|
||||
COPY conf-workers/caddy.complement.json /root/caddy.json
|
||||
|
||||
# Expose caddy's listener ports
|
||||
EXPOSE 8008 8448
|
||||
|
||||
ENTRYPOINT \
|
||||
# Replace the server name in the caddy config
|
||||
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
|
||||
# Start postgres
|
||||
pg_ctlcluster 13 main start 2>&1 && \
|
||||
# Start caddy
|
||||
/root/caddy start --config /root/caddy.json 2>&1 && \
|
||||
# Set the server name of the homeserver
|
||||
SYNAPSE_SERVER_NAME=${SERVER_NAME} \
|
||||
# No need to report stats here
|
||||
SYNAPSE_REPORT_STATS=no \
|
||||
# Set postgres authentication details which will be placed in the homeserver config file
|
||||
POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
|
||||
# Specify the workers to test with
|
||||
SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
frontend_proxy, \
|
||||
event_creator, \
|
||||
user_dir, \
|
||||
media_repository, \
|
||||
federation_inbound, \
|
||||
federation_reader, \
|
||||
federation_sender, \
|
||||
synchrotron, \
|
||||
appservice, \
|
||||
pusher" \
|
||||
# Run the script that writes the necessary config files and starts supervisord, which in turn
|
||||
# starts everything else
|
||||
/configure_workers_and_start.py
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8448"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"{{ server_name }}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "localhost:8008"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"{{ server_name }}"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"module": "internal"
|
||||
}
|
||||
],
|
||||
"on_demand": true
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"local": {
|
||||
"name": "Complement CA",
|
||||
"root": {
|
||||
"certificate": "/complement/ca/ca.crt",
|
||||
"private_key": "/complement/ca/ca.key"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
## Server ##
|
||||
report_stats: False
|
||||
trusted_key_servers: []
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
|
||||
## Federation ##
|
||||
|
||||
# trust certs signed by Complement's CA
|
||||
federation_custom_ca_list:
|
||||
- /complement/ca/ca.crt
|
||||
|
||||
# unblacklist RFC1918 addresses
|
||||
federation_ip_range_blacklist: []
|
||||
|
||||
# Disable server rate-limiting
|
||||
rc_federation:
|
||||
window_size: 1000
|
||||
sleep_limit: 10
|
||||
sleep_delay: 500
|
||||
reject_limit: 99999
|
||||
concurrent: 3
|
||||
|
||||
rc_message:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_registration:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_login:
|
||||
address:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
account:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
failed_attempts:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_admin_redaction:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_joins:
|
||||
local:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
remote:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
system_mxid_display_name: "Server Alert"
|
||||
system_mxid_avatar_url: ""
|
||||
room_name: "Server Alert"
|
||||
@@ -0,0 +1,115 @@
|
||||
## Server ##
|
||||
|
||||
server_name: SERVER_NAME
|
||||
log_config: /conf/log_config.yaml
|
||||
report_stats: False
|
||||
signing_key_path: /conf/server.signing.key
|
||||
trusted_key_servers: []
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
|
||||
## Listeners ##
|
||||
|
||||
tls_certificate_path: /conf/server.tls.crt
|
||||
tls_private_key_path: /conf/server.tls.key
|
||||
bcrypt_rounds: 4
|
||||
registration_shared_secret: complement
|
||||
|
||||
listeners:
|
||||
- port: 8448
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [federation]
|
||||
|
||||
- port: 8008
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
|
||||
resources:
|
||||
- names: [client]
|
||||
|
||||
## Database ##
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
# We avoid /data, as it is a volume and is not transferred when the container is committed,
|
||||
# which is a fundamental necessity in complement.
|
||||
database: "/conf/homeserver.db"
|
||||
|
||||
## Federation ##
|
||||
|
||||
# trust certs signed by the complement CA
|
||||
federation_custom_ca_list:
|
||||
- /complement/ca/ca.crt
|
||||
|
||||
# unblacklist RFC1918 addresses
|
||||
ip_range_blacklist: []
|
||||
|
||||
# Disable server rate-limiting
|
||||
rc_federation:
|
||||
window_size: 1000
|
||||
sleep_limit: 10
|
||||
sleep_delay: 500
|
||||
reject_limit: 99999
|
||||
concurrent: 3
|
||||
|
||||
rc_message:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_registration:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_login:
|
||||
address:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
account:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
failed_attempts:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_admin_redaction:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_joins:
|
||||
local:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
remote:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
# A list of application service config files to use
|
||||
#
|
||||
app_service_config_files:
|
||||
AS_REGISTRATION_FILES
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join
|
||||
msc3706_enabled: true
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
system_mxid_display_name: "Server Alert"
|
||||
system_mxid_avatar_url: ""
|
||||
room_name: "Server Alert"
|
||||
@@ -0,0 +1,24 @@
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
# log to stdout, for easier use with 'docker logs'
|
||||
stream: 'ext://sys.stdout'
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console]
|
||||
|
||||
disable_existing_loggers: false
|
||||
Executable
+30
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
|
||||
|
||||
# Add the application service registration files to the homeserver.yaml config
|
||||
for filename in /complement/appservice/*.yaml; do
|
||||
[ -f "$filename" ] || break
|
||||
|
||||
as_id=$(basename "$filename" .yaml)
|
||||
|
||||
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
|
||||
# so we can add the next application service in the next iteration of this for loop
|
||||
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
|
||||
done
|
||||
# Remove the AS_REGISTRATION_FILES entry
|
||||
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
|
||||
|
||||
# generate an ssl key and cert for the server, signed by the complement CA
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
|
||||
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
|
||||
-subj "/CN=${SERVER_NAME}"
|
||||
openssl x509 -req -in /conf/server.tls.csr \
|
||||
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
|
||||
-out /conf/server.tls.crt
|
||||
|
||||
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any, Dict, Set
|
||||
|
||||
import jinja2
|
||||
import yaml
|
||||
@@ -36,7 +37,7 @@ import yaml
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
|
||||
WORKERS_CONFIG = {
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.pusher",
|
||||
"listener_resources": [],
|
||||
@@ -307,7 +308,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
|
||||
Args:
|
||||
environ: _Environ[str]
|
||||
config_path: Where to output the generated Synapse main worker config file.
|
||||
config_path: The location of the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
"""
|
||||
@@ -320,7 +321,8 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result.
|
||||
# another listener for replication. Later we'll write out the result to the shared
|
||||
# config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
@@ -355,7 +357,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams = {}
|
||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
@@ -384,7 +386,11 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter = {}
|
||||
worker_type_counter: Dict[str, int] = {}
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
@@ -404,12 +410,14 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": worker_port, "config_path": config_path}
|
||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
if worker_type_total_count > 1:
|
||||
@@ -475,15 +483,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
|
||||
# At the same time, prepare a list of internal endpoints to healthcheck
|
||||
# starting with the main process which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (port,))
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||
# the relevant source files to be mounted into /src (done automatically by the
|
||||
# caller script). It will set up the database, run it, and then use the tox
|
||||
# configuration to run the tests.
|
||||
|
||||
set -e
|
||||
|
||||
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
||||
export PGUSER=postgres
|
||||
|
||||
# Start the database
|
||||
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
|
||||
|
||||
# Run the tests
|
||||
cd /src
|
||||
export TRIAL_FLAGS="-j 4"
|
||||
tox --workdir=./.tox-pg-container -e py37-postgres "$@"
|
||||
+5
-5
@@ -108,7 +108,7 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
|
||||
|
||||
# Hopefully we already have a signing key, but generate one if not.
|
||||
args = [
|
||||
"python",
|
||||
sys.executable,
|
||||
"-m",
|
||||
"synapse.app.homeserver",
|
||||
"--config-path",
|
||||
@@ -158,7 +158,7 @@ def run_generate_config(environ, ownership):
|
||||
|
||||
# generate the main config file, and a signing key.
|
||||
args = [
|
||||
"python",
|
||||
sys.executable,
|
||||
"-m",
|
||||
"synapse.app.homeserver",
|
||||
"--server-name",
|
||||
@@ -175,7 +175,7 @@ def run_generate_config(environ, ownership):
|
||||
"--open-private-ports",
|
||||
]
|
||||
# log("running %s" % (args, ))
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
os.execv(sys.executable, args)
|
||||
|
||||
|
||||
def main(args, environ):
|
||||
@@ -254,12 +254,12 @@ running with 'migrate_config'. See the README for more details.
|
||||
|
||||
log("Starting synapse with args " + " ".join(args))
|
||||
|
||||
args = ["python"] + args
|
||||
args = [sys.executable] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
os.execve("/usr/local/bin/python", args, environ)
|
||||
os.execve(sys.executable, args, environ)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
||||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
|
||||
- [Account data callbacks](modules/account_data_callbacks.md)
|
||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
@@ -82,6 +83,7 @@
|
||||
- [Release Cycle](development/releases.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Testing]()
|
||||
- [Demo scripts](development/demo.md)
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1 @@
|
||||
This directory contains changelogs for previous years.
|
||||
+4
-6
@@ -24,7 +24,7 @@ pip install -e ".[lint,mypy]"
|
||||
functionality) with:
|
||||
|
||||
```sh
|
||||
black . --exclude="\.tox|build|env"
|
||||
black .
|
||||
```
|
||||
|
||||
- **flake8**
|
||||
@@ -35,7 +35,7 @@ pip install -e ".[lint,mypy]"
|
||||
Check all application and test code with:
|
||||
|
||||
```sh
|
||||
flake8 synapse tests
|
||||
flake8 .
|
||||
```
|
||||
|
||||
- **isort**
|
||||
@@ -46,11 +46,9 @@ pip install -e ".[lint,mypy]"
|
||||
Auto-fix imports with:
|
||||
|
||||
```sh
|
||||
isort -rc synapse tests
|
||||
isort .
|
||||
```
|
||||
|
||||
`-rc` means to recursively search the given directories.
|
||||
|
||||
It's worth noting that modern IDEs and text editors can run these tools
|
||||
automatically on save. It may be worth looking into whether this
|
||||
functionality is supported in your editor for a more convenient
|
||||
@@ -172,6 +170,6 @@ frobber:
|
||||
```
|
||||
|
||||
Note that the sample configuration is generated from the synapse code
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config`.
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
|
||||
Making sure that the output from this script matches the desired format
|
||||
is left as an exercise for the reader!
|
||||
|
||||
@@ -206,9 +206,10 @@ To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||
following environment variables matching your configuration:
|
||||
|
||||
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||
- `SYNAPSE_POSTGRES_HOST`
|
||||
- `SYNAPSE_POSTGRES_USER`
|
||||
- `SYNAPSE_POSTGRES_PASSWORD`
|
||||
- `SYNAPSE_POSTGRES_HOST` (optional if it's the default: UNIX socket)
|
||||
- `SYNAPSE_POSTGRES_PORT` (optional if it's the default: 5432)
|
||||
- `SYNAPSE_POSTGRES_USER` (optional if using a UNIX socket)
|
||||
- `SYNAPSE_POSTGRES_PASSWORD` (optional if using a UNIX socket)
|
||||
|
||||
For example:
|
||||
|
||||
@@ -220,26 +221,12 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
|
||||
trial
|
||||
```
|
||||
|
||||
#### Prebuilt container
|
||||
You don't need to specify the host, user, port or password if your Postgres
|
||||
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
|
||||
works without further arguments).
|
||||
|
||||
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
|
||||
Docker container to set up PostgreSQL and run our tests for us. To do so, run
|
||||
Your Postgres account needs to be able to create databases.
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh
|
||||
```
|
||||
|
||||
Any extra arguments to the script will be passed to `tox` and then to `trial`,
|
||||
so we can run a specific test in this container with e.g.
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
|
||||
```
|
||||
|
||||
The container creates a folder in your Synapse checkout called
|
||||
`.tox-pg-container` and uses this as a tox environment. The output of any
|
||||
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
|
||||
as running `trial` directly on your host machine.
|
||||
|
||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||
|
||||
@@ -254,8 +241,14 @@ configuration:
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
This configuration should generally cover your needs.
|
||||
|
||||
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
|
||||
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
|
||||
|
||||
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
|
||||
@@ -458,6 +451,17 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
### Private Sign off
|
||||
|
||||
If you would like to provide your legal name privately to the Matrix.org
|
||||
Foundation (instead of in a public commit or comment), you can do so
|
||||
by emailing your legal name and a link to the pull request to
|
||||
[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
|
||||
It helps to include "sign off" or similar in the subject line. You will then
|
||||
be instructed further.
|
||||
|
||||
Once private sign off is complete, doing so for future contributions will not
|
||||
be required.
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
|
||||
|
||||
@@ -158,9 +158,9 @@ same as integers.
|
||||
There are three separate aspects to this:
|
||||
|
||||
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
`scripts/synapse_port_db`. This tells the port script to cast the integer
|
||||
value from SQLite to a boolean before writing the value to the postgres
|
||||
database.
|
||||
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
|
||||
the integer value from SQLite to a boolean before writing the value to the
|
||||
postgres database.
|
||||
|
||||
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
||||
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
# Synapse demo setup
|
||||
|
||||
**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
|
||||
|
||||
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
|
||||
|
||||
The demo setup allows running three federation Synapse servers, with server
|
||||
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
|
||||
|
||||
You can access them via any Matrix client over HTTP at `localhost:8080`,
|
||||
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
|
||||
`localhost:8481`, and `localhost:8482`.
|
||||
|
||||
To enable the servers to communicate, self-signed SSL certificates are generated
|
||||
and the servers are configured in a highly insecure way, including:
|
||||
|
||||
* Not checking certificates over federation.
|
||||
* Not verifying keys.
|
||||
|
||||
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
|
||||
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
|
||||
|
||||
Note that when joining a public room on a different HS via "#foo:bar.net", then
|
||||
you are (in the current impl) joining a room with room_id "foo". This means that
|
||||
it won't work if your HS already has a room with that name.
|
||||
|
||||
## Using the demo scripts
|
||||
|
||||
There's three main scripts with straightforward purposes:
|
||||
|
||||
* `start.sh` will start the Synapse servers, generating any missing configuration.
|
||||
* This accepts a single parameter `--no-rate-limit` to "disable" rate limits
|
||||
(they actually still exist, but are very high).
|
||||
* `stop.sh` will stop the Synapse servers.
|
||||
* `clean.sh` will delete the configuration, databases, log files, etc.
|
||||
|
||||
To start a completely new set of servers, run:
|
||||
|
||||
```sh
|
||||
./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
|
||||
```
|
||||
@@ -30,13 +30,58 @@ rather than skipping any that arrived late; whereas if you're looking at a
|
||||
historical section of timeline (i.e. `/messages`), you want to see the best
|
||||
representation of the state of the room as others were seeing it at the time.
|
||||
|
||||
## Outliers
|
||||
|
||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||
room at that point in the DAG yet. They are "floating" events that we haven't
|
||||
yet correlated to the DAG.
|
||||
|
||||
Outliers typically arise when we fetch the auth chain or state for a given
|
||||
event. When that happens, we just grab the events in the state/auth chain,
|
||||
without calculating the state at those events, or backfilling their
|
||||
`prev_events`. Since we don't have the state at any events fetched in that
|
||||
way, we mark them as outliers.
|
||||
|
||||
So, typically, we won't have the `prev_events` of an `outlier` in the database,
|
||||
(though it's entirely possible that we *might* have them for some other
|
||||
reason). Other things that make outliers different from regular events:
|
||||
|
||||
* We don't have state for them, so there should be no entry in
|
||||
`event_to_state_groups` for an outlier. (In practice this isn't always
|
||||
the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201).
|
||||
|
||||
* We don't record entries for them in the `event_edges`,
|
||||
`event_forward_extremeties` or `event_backward_extremities` tables.
|
||||
|
||||
Since outliers are not tied into the DAG, they do not normally form part of the
|
||||
timeline sent down to clients via `/sync` or `/messages`; however there is an
|
||||
exception:
|
||||
|
||||
### Out-of-band membership events
|
||||
|
||||
A special case of outlier events are some membership events for federated rooms
|
||||
that we aren't full members of. For example:
|
||||
|
||||
* invites received over federation, before we join the room
|
||||
* *rejections* for said invites
|
||||
* knock events for rooms that we would like to join but have not yet joined.
|
||||
|
||||
In all the above cases, we don't have the state for the room, which is why they
|
||||
are treated as outliers. They are a bit special though, in that they are
|
||||
proactively sent to clients via `/sync`.
|
||||
|
||||
## Forward extremity
|
||||
|
||||
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
|
||||
Most-recent-in-time events in the DAG which are not referenced by any other
|
||||
events' `prev_events` yet. (In this definition, outliers, rejected events, and
|
||||
soft-failed events don't count.)
|
||||
|
||||
The forward extremities of a room are used as the `prev_events` when the next event is sent.
|
||||
The forward extremities of a room (or at least, a subset of them, if there are
|
||||
more than ten) are used as the `prev_events` when the next event is sent.
|
||||
|
||||
The "current state" of a room (ie: the state which would be used if we
|
||||
generated a new event) is, therefore, the resolution of the room states
|
||||
at each of the forward extremities.
|
||||
|
||||
## Backward extremity
|
||||
|
||||
@@ -44,23 +89,14 @@ The current marker of where we have backfilled up to and will generally be the
|
||||
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
|
||||
backfilling history.
|
||||
|
||||
When we persist a non-outlier event, we clear it as a backward extremity and set
|
||||
all of its `prev_events` as the new backward extremities if they aren't already
|
||||
persisted in the `events` table.
|
||||
|
||||
|
||||
## Outliers
|
||||
|
||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||
room at that point in the DAG yet.
|
||||
|
||||
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
|
||||
but it's entirely possible that we *might*.
|
||||
|
||||
For example, when we fetch the event auth chain or state for a given event, we
|
||||
mark all of those claimed auth events as outliers because we haven't done the
|
||||
state calculation ourself.
|
||||
Note that, unlike forward extremities, we typically don't have any backward
|
||||
extremity events themselves in the database - or, if we do, they will be "outliers" (see
|
||||
above). Either way, we don't expect to have the room state at a backward extremity.
|
||||
|
||||
When we persist a non-outlier event, if it was previously a backward extremity,
|
||||
we clear it as a backward extremity and set all of its `prev_events` as the new
|
||||
backward extremities if they aren't already persisted as non-outliers. This
|
||||
therefore keeps the backward extremities up-to-date.
|
||||
|
||||
## State groups
|
||||
|
||||
|
||||
+2
-1
@@ -63,4 +63,5 @@ release of Synapse.
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation, there is a script in the `demo` directory. This is mainly
|
||||
useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/).
|
||||
useful just for development purposes. See
|
||||
[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html).
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
# Account data callbacks
|
||||
|
||||
Account data callbacks allow module developers to react to changes of the account data
|
||||
of local users. Account data callbacks can be registered using the module API's
|
||||
`register_account_data_callbacks` method.
|
||||
|
||||
## Callbacks
|
||||
|
||||
The available account data callbacks are:
|
||||
|
||||
### `on_account_data_updated`
|
||||
|
||||
_First introduced in Synapse v1.57.0_
|
||||
|
||||
```python
|
||||
async def on_account_data_updated(
|
||||
user_id: str,
|
||||
room_id: Optional[str],
|
||||
account_data_type: str,
|
||||
content: "synapse.module_api.JsonDict",
|
||||
) -> None:
|
||||
```
|
||||
|
||||
Called after user's account data has been updated. The module is given the
|
||||
Matrix ID of the user whose account data is changing, the room ID the data is associated
|
||||
with, the type associated with the change, as well as the new content. If the account
|
||||
data is not associated with a specific room, then the room ID is `None`.
|
||||
|
||||
This callback is triggered when new account data is added or when the data associated with
|
||||
a given type (and optionally room) changes. This includes deletion, since in Matrix,
|
||||
deleting account data consists of replacing the data associated with a given type
|
||||
(and optionally room) with an empty dictionary (`{}`).
|
||||
|
||||
Note that this doesn't trigger when changing the tags associated with a room, as these are
|
||||
processed separately by Synapse.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the `on_account_data_updated` callback, and
|
||||
sends an event to an audit room when a user changes their account data.
|
||||
|
||||
```python
|
||||
import json
|
||||
import attr
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from synapse.module_api import JsonDict, ModuleApi
|
||||
from synapse.module_api.errors import ConfigError
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class CustomAccountDataConfig:
|
||||
audit_room: str
|
||||
sender: str
|
||||
|
||||
|
||||
class CustomAccountDataModule:
|
||||
def __init__(self, config: CustomAccountDataConfig, api: ModuleApi):
|
||||
self.api = api
|
||||
self.config = config
|
||||
|
||||
self.api.register_account_data_callbacks(
|
||||
on_account_data_updated=self.log_new_account_data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config: Dict[str, Any]) -> CustomAccountDataConfig:
|
||||
def check_in_config(param: str):
|
||||
if param not in config:
|
||||
raise ConfigError(f"'{param}' is required")
|
||||
|
||||
check_in_config("audit_room")
|
||||
check_in_config("sender")
|
||||
|
||||
return CustomAccountDataConfig(
|
||||
audit_room=config["audit_room"],
|
||||
sender=config["sender"],
|
||||
)
|
||||
|
||||
async def log_new_account_data(
|
||||
self,
|
||||
user_id: str,
|
||||
room_id: Optional[str],
|
||||
account_data_type: str,
|
||||
content: JsonDict,
|
||||
) -> None:
|
||||
content_raw = json.dumps(content)
|
||||
msg_content = f"{user_id} has changed their account data for type {account_data_type} to: {content_raw}"
|
||||
|
||||
if room_id is not None:
|
||||
msg_content += f" (in room {room_id})"
|
||||
|
||||
await self.api.create_and_send_event_into_room(
|
||||
{
|
||||
"room_id": self.config.audit_room,
|
||||
"sender": self.config.sender,
|
||||
"type": "m.room.message",
|
||||
"content": {
|
||||
"msgtype": "m.text",
|
||||
"body": msg_content
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
@@ -172,7 +172,7 @@ any of the subsequent implementations of this callback.
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
|
||||
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool
|
||||
```
|
||||
|
||||
Called when computing search results in the user directory. The module must return a
|
||||
@@ -182,9 +182,11 @@ search results; otherwise return `False`.
|
||||
|
||||
The profile is represented as a dictionary with the following keys:
|
||||
|
||||
* `user_id`: The Matrix ID for this user.
|
||||
* `display_name`: The user's display name.
|
||||
* `avatar_url`: The `mxc://` URL to the user's avatar.
|
||||
* `user_id: str`. The Matrix ID for this user.
|
||||
* `display_name: Optional[str]`. The user's display name, or `None` if this user
|
||||
has not set a display name.
|
||||
* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None`
|
||||
if this user has not set an avatar.
|
||||
|
||||
The module is given a copy of the original dictionary, so modifying it from within the
|
||||
module cannot modify a user's profile when included in user directory search results.
|
||||
|
||||
@@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `check_can_shutdown_room`
|
||||
|
||||
_First introduced in Synapse v1.55.0_
|
||||
|
||||
```python
|
||||
async def check_can_shutdown_room(
|
||||
user_id: str, room_id: str,
|
||||
) -> bool:
|
||||
```
|
||||
|
||||
Called when an admin user requests the shutdown of a room. The module must return a
|
||||
boolean indicating whether the shutdown can go through. If the callback returns `False`,
|
||||
the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_can_deactivate_user`
|
||||
|
||||
_First introduced in Synapse v1.55.0_
|
||||
|
||||
```python
|
||||
async def check_can_deactivate_user(
|
||||
user_id: str, by_admin: bool,
|
||||
) -> bool:
|
||||
```
|
||||
|
||||
Called when the deactivation of a user is requested. User deactivation can be
|
||||
performed by an admin or the user themselves, so developers are encouraged to check the
|
||||
requester when implementing this callback. The module must return a
|
||||
boolean indicating whether the deactivation can go through. If the callback returns `False`,
|
||||
the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error.
|
||||
|
||||
The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
|
||||
### `on_profile_update`
|
||||
|
||||
_First introduced in Synapse v1.54.0_
|
||||
@@ -204,6 +247,24 @@ admin API.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `on_threepid_bind`
|
||||
|
||||
_First introduced in Synapse v1.56.0_
|
||||
|
||||
```python
|
||||
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
|
||||
```
|
||||
|
||||
Called after creating an association between a local user and a third-party identifier
|
||||
(email address, phone number). The module is given the Matrix ID of the user the
|
||||
association is for, as well as the medium (`email` or `msisdn`) and address of the
|
||||
third-party identifier.
|
||||
|
||||
Note that this callback is _not_ called after a successful association on an _identity
|
||||
server_.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the third-party rules callback
|
||||
|
||||
@@ -33,7 +33,7 @@ A module can implement the following static method:
|
||||
|
||||
```python
|
||||
@staticmethod
|
||||
def parse_config(config: dict) -> dict
|
||||
def parse_config(config: dict) -> Any
|
||||
```
|
||||
|
||||
This method is given a dictionary resulting from parsing the YAML configuration for the
|
||||
|
||||
+3
-1
@@ -225,6 +225,8 @@ oidc_providers:
|
||||
3. Create an application for synapse in Authentik and link it to the provider.
|
||||
4. Note the slug of your application, Client ID and Client Secret.
|
||||
|
||||
Note: RSA keys must be used for signing for Authentik, ECC keys do not work.
|
||||
|
||||
Synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
@@ -240,7 +242,7 @@ oidc_providers:
|
||||
- "email"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}}"
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
|
||||
```
|
||||
|
||||
|
||||
@@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr
|
||||
Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml.
|
||||
|
||||
```
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
# Disable presence tracking, which is currently fairly resource intensive
|
||||
# More info: https://github.com/matrix-org/synapse/issues/9478
|
||||
use_presence: false
|
||||
|
||||
# When this is enabled, the room "complexity" will be checked before a user
|
||||
# joins a new remote room. If it is above the complexity limit, the server will
|
||||
# disallow joining, or will instantly leave.
|
||||
# Set a small complexity limit, preventing users from joining large rooms
|
||||
# which may be resource-intensive to remain a part of.
|
||||
#
|
||||
# Note that this will not prevent users from joining smaller rooms that
|
||||
# eventually become complex.
|
||||
limit_remote_rooms:
|
||||
# Uncomment to enable room complexity checking.
|
||||
#enabled: true
|
||||
enabled: true
|
||||
complexity: 3.0
|
||||
|
||||
# Database configuration
|
||||
database:
|
||||
# Use postgres for the best performance
|
||||
name: psycopg2
|
||||
args:
|
||||
user: matrix-synapse
|
||||
# Generate a long, secure one with a password manager
|
||||
# Generate a long, secure password using a password manager
|
||||
password: hunter2
|
||||
database: matrix-synapse
|
||||
host: localhost
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
```
|
||||
|
||||
Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this:
|
||||
|
||||
+8
-7
@@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the
|
||||
copy is complete, restart synapse. For instance:
|
||||
|
||||
```sh
|
||||
./synctl stop
|
||||
synctl stop
|
||||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
synctl start
|
||||
```
|
||||
|
||||
Copy the old config file into a new config file:
|
||||
@@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the
|
||||
PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||
|
||||
```sh
|
||||
./synctl stop
|
||||
synctl stop
|
||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||
mv homeserver-postgres.yaml homeserver.yaml
|
||||
./synctl start
|
||||
synctl start
|
||||
```
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
@@ -234,12 +234,13 @@ host all all ::1/128 ident
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
|
||||
of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to dump the database and recreate it with
|
||||
If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
@@ -182,7 +182,7 @@ matrix.example.com {
|
||||
|
||||
```
|
||||
frontend https
|
||||
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
bind *:443,[::]:443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
@@ -195,7 +195,7 @@ frontend https
|
||||
use_backend matrix if matrix-host matrix-path
|
||||
|
||||
frontend matrix-federation
|
||||
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
bind *:8448,[::]:8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
+64
-3
@@ -539,6 +539,15 @@ templates:
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
# List of rooms to exclude from sync responses. This is useful for server
|
||||
# administrators wishing to group users into a room without these users being able
|
||||
# to see it from their client.
|
||||
#
|
||||
# By default, no room is excluded.
|
||||
#
|
||||
#exclude_rooms_from_sync:
|
||||
# - !foo:example.com
|
||||
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
@@ -783,6 +792,12 @@ caches:
|
||||
# 'txn_limit' gives the maximum number of transactions to run per connection
|
||||
# before reconnecting. Defaults to 0, which means no limit.
|
||||
#
|
||||
# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to
|
||||
# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended)
|
||||
# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information
|
||||
# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here:
|
||||
# https://wiki.postgresql.org/wiki/Locale_data_changes
|
||||
#
|
||||
# 'args' gives options which are passed through to the database engine,
|
||||
# except for options starting 'cp_', which are used to configure the Twisted
|
||||
# connection pool. For a reference to valid arguments, see:
|
||||
@@ -1212,10 +1227,18 @@ oembed:
|
||||
# Registration can be rate-limited using the parameters in the "Ratelimiting"
|
||||
# section of this file.
|
||||
|
||||
# Enable registration for new users.
|
||||
# Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration,
|
||||
# you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
|
||||
# without any verification, you must also set `enable_registration_without_verification`, found below.
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Enable registration without email or captcha verification. Note: this option is *not* recommended,
|
||||
# as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
|
||||
# unless `enable_registration` is also enabled.
|
||||
#
|
||||
#enable_registration_without_verification: true
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
@@ -1947,8 +1970,14 @@ saml2_config:
|
||||
#
|
||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||
# If this is not set, the user will be prompted to choose their
|
||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
||||
# section of this file).
|
||||
# own username (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template). This template can
|
||||
# use the 'localpart_from_email' filter.
|
||||
#
|
||||
# confirm_localpart: Whether to prompt the user to validate (or
|
||||
# change) the generated localpart (see the documentation for the
|
||||
# 'sso_auth_account_details.html' template), instead of
|
||||
# registering the account right away.
|
||||
#
|
||||
# display_name_template: Jinja2 template for the display name to set
|
||||
# on first login. If unset, no displayname will be set.
|
||||
@@ -2729,3 +2758,35 @@ redis:
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
|
||||
## Background Updates ##
|
||||
|
||||
# Background updates are database updates that are run in the background in batches.
|
||||
# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
|
||||
# sleep can all be configured. This is helpful to speed up or slow down the updates.
|
||||
#
|
||||
background_updates:
|
||||
# How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
|
||||
# a time to change the default.
|
||||
#
|
||||
#background_update_duration_ms: 500
|
||||
|
||||
# Whether to sleep between updates. Defaults to True. Uncomment to change the default.
|
||||
#
|
||||
#sleep_enabled: false
|
||||
|
||||
# If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
|
||||
# and set a duration to change the default.
|
||||
#
|
||||
#sleep_duration_ms: 300
|
||||
|
||||
# Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
|
||||
# set a size to change the default.
|
||||
#
|
||||
#min_batch_size: 10
|
||||
|
||||
# The batch size to use for the first iteration of a new background update. The default is 100.
|
||||
# Uncomment and set a size to change the default.
|
||||
#
|
||||
#default_batch_size: 50
|
||||
|
||||
@@ -78,82 +78,3 @@ loggers:
|
||||
The above logging config will set Synapse as 'INFO' logging level by default,
|
||||
with the SQL layer at 'WARNING', and will log JSON formatted messages to a
|
||||
remote endpoint at 10.1.2.3:9999.
|
||||
|
||||
## Upgrading from legacy structured logging configuration
|
||||
|
||||
Versions of Synapse prior to v1.54.0 automatically converted the legacy
|
||||
structured logging configuration, which was deprecated in v1.23.0, to the standard
|
||||
library logging configuration.
|
||||
|
||||
The following reference can be used to update your configuration. Based on the
|
||||
drain `type`, we can pick a new handler:
|
||||
|
||||
1. For a type of `console`, `console_json`, or `console_json_terse`: a handler
|
||||
with a class of `logging.StreamHandler` and a `stream` of `ext://sys.stdout`
|
||||
or `ext://sys.stderr` should be used.
|
||||
2. For a type of `file` or `file_json`: a handler of `logging.FileHandler` with
|
||||
a location of the file path should be used.
|
||||
3. For a type of `network_json_terse`: a handler of `synapse.logging.RemoteHandler`
|
||||
with the host and port should be used.
|
||||
|
||||
Then based on the drain `type` we can pick a new formatter:
|
||||
|
||||
1. For a type of `console` or `file` no formatter is necessary.
|
||||
2. For a type of `console_json` or `file_json`: a formatter of
|
||||
`synapse.logging.JsonFormatter` should be used.
|
||||
3. For a type of `console_json_terse` or `network_json_terse`: a formatter of
|
||||
`synapse.logging.TerseJsonFormatter` should be used.
|
||||
|
||||
For each new handler and formatter they should be added to the logging configuration
|
||||
and then assigned to either a logger or the root logger.
|
||||
|
||||
An example legacy configuration:
|
||||
|
||||
```yaml
|
||||
structured: true
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
synapse.storage.SQL:
|
||||
level: WARNING
|
||||
|
||||
drains:
|
||||
console:
|
||||
type: console
|
||||
location: stdout
|
||||
file:
|
||||
type: file_json
|
||||
location: homeserver.log
|
||||
```
|
||||
|
||||
Would be converted into a new configuration:
|
||||
|
||||
```yaml
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
json:
|
||||
class: synapse.logging.JsonFormatter
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
stream: ext://sys.stdout
|
||||
file:
|
||||
class: logging.FileHandler
|
||||
formatter: json
|
||||
filename: homeserver.log
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
handlers: [console, file]
|
||||
synapse.storage.SQL:
|
||||
level: WARNING
|
||||
```
|
||||
|
||||
The new logging configuration is a bit more verbose, but significantly more
|
||||
flexible. It allows for configuration that were not previously possible, such as
|
||||
sending plain logs over the network, or using different handlers for different
|
||||
modules.
|
||||
|
||||
+12
-2
@@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver'
|
||||
|
||||
Example: `message.sender_avatar_url|mxc_to_http(32,32)`
|
||||
|
||||
```python
|
||||
localpart_from_email(address: str) -> str
|
||||
```
|
||||
|
||||
Returns the local part of an email address (e.g. `alice` in `alice@example.com`).
|
||||
|
||||
Example: `user.email_address|localpart_from_email`
|
||||
|
||||
## Email templates
|
||||
|
||||
@@ -176,8 +183,11 @@ Below are the templates Synapse will look for when generating pages related to S
|
||||
for the brand of the IdP
|
||||
* `user_attributes`: an object containing details about the user that
|
||||
we received from the IdP. May have the following attributes:
|
||||
* display_name: the user's display_name
|
||||
* emails: a list of email addresses
|
||||
* `display_name`: the user's display name
|
||||
* `emails`: a list of email addresses
|
||||
* `localpart`: the local part of the Matrix user ID to register,
|
||||
if `localpart_template` is set in the mapping provider configuration (empty
|
||||
string if not)
|
||||
The template should render a form which submits the following fields:
|
||||
* `username`: the localpart of the user's chosen user id
|
||||
* `sso_new_user_consent.html`: HTML page allowing the user to consent to the
|
||||
|
||||
+3
-2
@@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse:
|
||||
|
||||
* If you use synctl:
|
||||
```sh
|
||||
cd /where/you/run/synapse
|
||||
./synctl restart
|
||||
# Depending on how Synapse is installed, synctl may already be on
|
||||
# your PATH. If not, you may need to activate a virtual environment.
|
||||
synctl restart
|
||||
```
|
||||
* If you use systemd:
|
||||
```sh
|
||||
|
||||
+115
-8
@@ -47,7 +47,7 @@ this document.
|
||||
3. Restart Synapse:
|
||||
|
||||
```bash
|
||||
./synctl restart
|
||||
synctl restart
|
||||
```
|
||||
|
||||
To check whether your update was successful, you can check the running
|
||||
@@ -85,6 +85,113 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.57.0
|
||||
|
||||
## Changes to database schema for application services
|
||||
|
||||
Synapse v1.57.0 includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the
|
||||
way transaction IDs are managed for application services. If your deployment uses a dedicated
|
||||
worker for application service traffic, **it must be stopped** when the database is upgraded
|
||||
(which normally happens when the main process is upgraded), to ensure the change is made safely
|
||||
without any risk of reusing transaction IDs.
|
||||
|
||||
Deployments which do not use separate worker processes can be upgraded as normal. Similarly,
|
||||
deployments where no application services are in use can be upgraded as normal.
|
||||
|
||||
<details>
|
||||
<summary><b>Recovering from an incorrect upgrade</b></summary>
|
||||
|
||||
If the database schema is upgraded *without* stopping the worker responsible
|
||||
for AS traffic, then the following error may be given when attempting to start
|
||||
a Synapse worker or master process:
|
||||
|
||||
```
|
||||
**********************************************************************************
|
||||
Error during initialisation:
|
||||
|
||||
Postgres sequence 'application_services_txn_id_seq' is inconsistent with associated
|
||||
table 'application_services_txns'. This can happen if Synapse has been downgraded and
|
||||
then upgraded again, or due to a bad migration.
|
||||
|
||||
To fix this error, shut down Synapse (including any and all workers)
|
||||
and run the following SQL:
|
||||
|
||||
SELECT setval('application_services_txn_id_seq', (
|
||||
SELECT GREATEST(MAX(txn_id), 0) FROM application_services_txns
|
||||
));
|
||||
|
||||
See docs/postgres.md for more information.
|
||||
|
||||
There may be more information in the logs.
|
||||
**********************************************************************************
|
||||
```
|
||||
|
||||
This error may also be seen if Synapse is *downgraded* to an earlier version,
|
||||
and then upgraded again to v1.57.0 or later.
|
||||
|
||||
In either case:
|
||||
|
||||
1. Ensure that the worker responsible for AS traffic is stopped.
|
||||
2. Run the SQL command given in the error message via `psql`.
|
||||
|
||||
Synapse should then start correctly.
|
||||
</details>
|
||||
|
||||
# Upgrading to v1.56.0
|
||||
|
||||
## Open registration without verification is now disabled by default
|
||||
|
||||
Synapse will refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config
|
||||
flag `enable_registration_without_verification` is set to "true".
|
||||
|
||||
## Groups/communities feature has been deprecated
|
||||
|
||||
The non-standard groups/communities feature in Synapse has been deprecated and will
|
||||
be disabled by default in Synapse v1.58.0.
|
||||
|
||||
You can test disabling it by adding the following to your homeserver configuration:
|
||||
|
||||
```yaml
|
||||
experimental_features:
|
||||
groups_enabled: false
|
||||
```
|
||||
|
||||
## Change in behaviour for PostgreSQL databases with unsafe locale
|
||||
|
||||
Synapse now refuses to start when using PostgreSQL with non-`C` values for `COLLATE` and
|
||||
`CTYPE` unless the config flag `allow_unsafe_locale`, found in the database section of
|
||||
the configuration file, is set to `true`. See the [PostgreSQL documentation](https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype)
|
||||
for more information and instructions on how to fix a database with incorrect values.
|
||||
|
||||
# Upgrading to v1.55.0
|
||||
|
||||
## `synctl` script has been moved
|
||||
|
||||
The `synctl` script
|
||||
[has been made](https://github.com/matrix-org/synapse/pull/12140) an
|
||||
[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
|
||||
and no longer exists at the root of Synapse's source tree. If you wish to use
|
||||
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
|
||||
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
|
||||
|
||||
You will need to ensure `synctl` is on your `PATH`.
|
||||
- This is automatically the case when using
|
||||
[Debian packages](https://packages.matrix.org/debian/) or
|
||||
[docker images](https://hub.docker.com/r/matrixdotorg/synapse)
|
||||
provided by Matrix.org.
|
||||
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
|
||||
to your Python installation's `bin`. This should be on your `PATH`
|
||||
automatically, though you might need to activate a virtual environment
|
||||
depending on how you installed Synapse.
|
||||
|
||||
|
||||
## Compatibility dropped for Mjolnir 1.3.1 and earlier
|
||||
|
||||
Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier.
|
||||
If you use the Mjolnir module to moderate your homeserver,
|
||||
please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
|
||||
|
||||
|
||||
# Upgrading to v1.54.0
|
||||
|
||||
## Legacy structured logging configuration removal
|
||||
@@ -92,7 +199,7 @@ process, for example:
|
||||
This release removes support for the `structured: true` logging configuration
|
||||
which was deprecated in Synapse v1.23.0. If your logging configuration contains
|
||||
`structured: true` then it should be modified based on the
|
||||
[structured logging documentation](structured_logging.md).
|
||||
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
|
||||
|
||||
# Upgrading to v1.53.0
|
||||
|
||||
@@ -108,8 +215,8 @@ the `/_matrix/client/` path.
|
||||
|
||||
## Stablisation of MSC3231
|
||||
|
||||
The unstable validity-check endpoint for the
|
||||
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
|
||||
The unstable validity-check endpoint for the
|
||||
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
|
||||
feature has been stabilised and moved from:
|
||||
|
||||
`/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity`
|
||||
@@ -123,9 +230,9 @@ Please update any relevant reverse proxy or firewall configurations appropriatel
|
||||
## Time-based cache expiry is now enabled by default
|
||||
|
||||
Formerly, entries in the cache were not evicted regardless of whether they were accessed after storing.
|
||||
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
|
||||
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
|
||||
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
|
||||
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
|
||||
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
|
||||
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
|
||||
The `expiry_time` flag will still continue to work, but it has been deprecated and will be removed in the future.
|
||||
|
||||
## Deprecation of `capability` `org.matrix.msc3283.*`
|
||||
@@ -709,7 +816,7 @@ lock down external access to the Admin API endpoints.
|
||||
This release deprecates use of the `structured: true` logging
|
||||
configuration for structured logging. If your logging configuration
|
||||
contains `structured: true` then it should be modified based on the
|
||||
[structured logging documentation](structured_logging.md).
|
||||
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
|
||||
|
||||
The `structured` and `drains` logging options are now deprecated and
|
||||
should be replaced by standard logging configuration of `handlers` and
|
||||
|
||||
@@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
|
||||
A new server admin user can also be created using the `register_new_matrix_user`
|
||||
command. This is a script that is located in the `scripts/` directory, or possibly
|
||||
command. This is a script that is distributed as part of synapse. It is possibly
|
||||
already on your `$PATH` depending on how Synapse was installed.
|
||||
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
+42
-32
@@ -27,7 +27,7 @@ feeds streams of newly written data between processes so they can be kept in
|
||||
sync with the database state.
|
||||
|
||||
When configured to do so, Synapse uses a
|
||||
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
|
||||
[Redis pub/sub channel](https://redis.io/docs/manual/pubsub/) to send the replication
|
||||
stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
@@ -185,8 +185,8 @@ worker: refer to the [stream writers](#stream-writers) section below for further
|
||||
information.
|
||||
|
||||
# Sync requests
|
||||
^/_matrix/client/(v2_alpha|r0|v3)/sync$
|
||||
^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$
|
||||
^/_matrix/client/(r0|v3)/sync$
|
||||
^/_matrix/client/(api/v1|r0|v3)/events$
|
||||
^/_matrix/client/(api/v1|r0|v3)/initialSync$
|
||||
^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
|
||||
|
||||
@@ -200,13 +200,9 @@ information.
|
||||
^/_matrix/federation/v1/query/
|
||||
^/_matrix/federation/v1/make_join/
|
||||
^/_matrix/federation/v1/make_leave/
|
||||
^/_matrix/federation/v1/send_join/
|
||||
^/_matrix/federation/v2/send_join/
|
||||
^/_matrix/federation/v1/send_leave/
|
||||
^/_matrix/federation/v2/send_leave/
|
||||
^/_matrix/federation/v1/invite/
|
||||
^/_matrix/federation/v2/invite/
|
||||
^/_matrix/federation/v1/query_auth/
|
||||
^/_matrix/federation/(v1|v2)/send_join/
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
@@ -274,6 +270,8 @@ information.
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/federation/v1/groups/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/(r0|v3|unstable)/groups/
|
||||
|
||||
Pagination requests can also be handled, but all requests for a given
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
@@ -351,8 +349,11 @@ is only supported with Redis-based replication.)
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams. For example, to move event persistence off to a
|
||||
dedicated worker, the shared configuration would include:
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
@@ -370,8 +371,8 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream also experimentally supports having multiple writers, where
|
||||
work is sharded between them by room ID. Note that you *must* restart all worker
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
configuration with multiple writers:
|
||||
|
||||
@@ -384,38 +385,38 @@ stream_writers:
|
||||
|
||||
##### The `typing` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `typing` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `typing` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
||||
|
||||
##### The `to_device` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `to_device` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `to_device` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
|
||||
^/_matrix/client/(r0|v3|unstable)/sendToDevice/
|
||||
|
||||
##### The `account_data` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `account_data` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `account_data` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
||||
|
||||
##### The `receipts` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `receipts` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `receipts` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
|
||||
^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
||||
^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
||||
|
||||
##### The `presence` stream
|
||||
|
||||
The following endpoints should be routed directly to the workers configured as
|
||||
stream writers for the `presence` stream:
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
@@ -525,19 +526,28 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
|
||||
Handles searches in the user directory. It can handle REST endpoints matching
|
||||
the following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
When using this worker you must also set `update_user_directory: False` in the
|
||||
When using this worker you must also set `update_user_directory: false` in the
|
||||
shared configuration file to stop the main synapse running background
|
||||
jobs related to updating the user directory.
|
||||
|
||||
Above endpoint is not *required* to be routed to this worker. By default,
|
||||
`update_user_directory` is set to `true`, which means the main process
|
||||
will handle updates. All workers configured with `client` can handle the above
|
||||
endpoint as long as either this worker or the main process are configured to
|
||||
handle it, and are online.
|
||||
|
||||
If `update_user_directory` is set to `false`, and this worker is not running,
|
||||
the above endpoint may give outdated results.
|
||||
|
||||
### `synapse.app.frontend_proxy`
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
load from the main synapse. It can handle REST endpoints matching the following
|
||||
regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload
|
||||
|
||||
If `use_presence` is False in the homeserver config, it can also handle REST
|
||||
endpoints matching the following regular expressions:
|
||||
|
||||
@@ -11,7 +11,8 @@ local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
|
||||
files =
|
||||
scripts-dev/sign_json,
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
setup.py,
|
||||
synapse/,
|
||||
tests/
|
||||
@@ -23,19 +24,16 @@ files =
|
||||
# https://docs.python.org/3/library/re.html#re.X
|
||||
exclude = (?x)
|
||||
^(
|
||||
|scripts-dev/build_debian_packages.py
|
||||
|scripts-dev/federation_client.py
|
||||
|scripts-dev/release.py
|
||||
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/databases/main/devices.py
|
||||
|synapse/storage/databases/main/event_federation.py
|
||||
|synapse/storage/databases/main/group_server.py
|
||||
|synapse/storage/databases/main/metrics.py
|
||||
|synapse/storage/databases/main/monthly_active_users.py
|
||||
|synapse/storage/databases/main/push_rule.py
|
||||
|synapse/storage/databases/main/receipts.py
|
||||
|synapse/storage/databases/main/roommember.py
|
||||
|synapse/storage/databases/main/search.py
|
||||
|synapse/storage/databases/main/state.py
|
||||
|synapse/storage/schema/
|
||||
|
||||
|tests/api/test_auth.py
|
||||
@@ -52,14 +50,6 @@ exclude = (?x)
|
||||
|tests/federation/test_federation_server.py
|
||||
|tests/federation/transport/test_knocking.py
|
||||
|tests/federation/transport/test_server.py
|
||||
|tests/handlers/test_cas.py
|
||||
|tests/handlers/test_directory.py
|
||||
|tests/handlers/test_e2e_keys.py
|
||||
|tests/handlers/test_federation.py
|
||||
|tests/handlers/test_oidc.py
|
||||
|tests/handlers/test_presence.py
|
||||
|tests/handlers/test_profile.py
|
||||
|tests/handlers/test_saml.py
|
||||
|tests/handlers/test_typing.py
|
||||
|tests/http/federation/test_matrix_federation_agent.py
|
||||
|tests/http/federation/test_srv_resolver.py
|
||||
@@ -71,37 +61,20 @@ exclude = (?x)
|
||||
|tests/logging/test_terse_json.py
|
||||
|tests/module_api/test_api.py
|
||||
|tests/push/test_email.py
|
||||
|tests/push/test_http.py
|
||||
|tests/push/test_presentable_names.py
|
||||
|tests/push/test_push_rule_evaluator.py
|
||||
|tests/rest/client/test_account.py
|
||||
|tests/rest/client/test_filter.py
|
||||
|tests/rest/client/test_report_event.py
|
||||
|tests/rest/client/test_rooms.py
|
||||
|tests/rest/client/test_third_party_rules.py
|
||||
|tests/rest/client/test_transactions.py
|
||||
|tests/rest/client/test_typing.py
|
||||
|tests/rest/key/v2/test_remote_key_resource.py
|
||||
|tests/rest/media/v1/test_base.py
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/rest/media/v1/test_url_preview.py
|
||||
|tests/scripts/test_new_matrix_user.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/state/test_v2.py
|
||||
|tests/storage/test_background_update.py
|
||||
|tests/storage/test_base.py
|
||||
|tests/storage/test_client_ips.py
|
||||
|tests/storage/test_database.py
|
||||
|tests/storage/test_event_federation.py
|
||||
|tests/storage/test_id_generators.py
|
||||
|tests/storage/test_roommember.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_phone_home.py
|
||||
|tests/test_server.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/unittest.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
|tests/util/caches/test_deferred_cache.py
|
||||
|tests/util/caches/test_descriptors.py
|
||||
@@ -120,6 +93,9 @@ exclude = (?x)
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse._scripts.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -129,7 +105,7 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.appservice.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.config._base]
|
||||
[mypy-synapse.config.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.crypto.*]
|
||||
@@ -246,10 +222,7 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.admin.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.client.*]
|
||||
[mypy-tests.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
@@ -291,6 +264,9 @@ ignore_missing_imports = True
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-importlib_metadata.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -350,3 +326,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
Generated
+2835
File diff suppressed because it is too large
Load Diff
+234
-18
@@ -36,24 +36,9 @@
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py37', 'py38', 'py39', 'py310']
|
||||
exclude = '''
|
||||
|
||||
(
|
||||
/(
|
||||
\.eggs # exclude a few common directories in the
|
||||
| \.git # root of the project
|
||||
| \.tox
|
||||
| \.venv
|
||||
| \.env
|
||||
| env
|
||||
| _build
|
||||
| _trial_temp.*
|
||||
| build
|
||||
| dist
|
||||
| debian
|
||||
)/
|
||||
)
|
||||
'''
|
||||
# black ignores everything in .gitignore by default, see
|
||||
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
|
||||
# Use `extend-exclude` if you want to exclude something in addition to this.
|
||||
|
||||
[tool.isort]
|
||||
line_length = 88
|
||||
@@ -65,4 +50,235 @@ known_twisted = ["twisted", "OpenSSL"]
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.57.1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
readme = "README.rst"
|
||||
repository = "https://github.com/matrix-org/synapse"
|
||||
packages = [
|
||||
{ include = "synapse" },
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Communications :: Chat",
|
||||
]
|
||||
include = [
|
||||
{ path = "AUTHORS.rst", format = "sdist" },
|
||||
{ path = "book.toml", format = "sdist" },
|
||||
{ path = "changelog.d", format = "sdist" },
|
||||
{ path = "CHANGES.md", format = "sdist" },
|
||||
{ path = "CONTRIBUTING.md", format = "sdist" },
|
||||
{ path = "demo", format = "sdist" },
|
||||
{ path = "docs", format = "sdist" },
|
||||
{ path = "INSTALL.md", format = "sdist" },
|
||||
{ path = "mypy.ini", format = "sdist" },
|
||||
{ path = "scripts-dev", format = "sdist" },
|
||||
{ path = "synmark", format="sdist" },
|
||||
{ path = "sytest-blacklist", format = "sdist" },
|
||||
{ path = "tests", format = "sdist" },
|
||||
{ path = "UPGRADE.rst", format = "sdist" },
|
||||
]
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
synapse_worker = "synapse.app.generic_worker:main"
|
||||
synctl = "synapse._scripts.synctl:main"
|
||||
|
||||
export_signing_key = "synapse._scripts.export_signing_key:main"
|
||||
generate_config = "synapse._scripts.generate_config:main"
|
||||
generate_log_config = "synapse._scripts.generate_log_config:main"
|
||||
generate_signing_key = "synapse._scripts.generate_signing_key:main"
|
||||
hash_password = "synapse._scripts.hash_password:main"
|
||||
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
|
||||
synapse_port_db = "synapse._scripts.synapse_port_db:main"
|
||||
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
jsonschema = ">=3.0.0"
|
||||
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
|
||||
frozendict = ">=1,!=2.1.2"
|
||||
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
|
||||
unpaddedbase64 = ">=2.1.0"
|
||||
canonicaljson = ">=1.4.0"
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
signedjson = ">=1.1.0"
|
||||
PyNaCl = ">=1.2.1"
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
service-identity = ">=18.1.0"
|
||||
# Twisted 18.9 introduces some logger improvements that the structured
|
||||
# logger utilises
|
||||
Twisted = {extras = ["tls"], version = ">=18.9.0"}
|
||||
treq = ">=15.1"
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.11"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.0"
|
||||
Pillow = ">=5.4.0"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
pymacaroons = ">=0.13.0"
|
||||
msgpack = ">=0.5.2"
|
||||
phonenumbers = ">=8.2.0"
|
||||
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
|
||||
prometheus-client = ">=0.4.0"
|
||||
# we use `order`, which arrived in attrs 19.2.0.
|
||||
# Note: 21.1.0 broke `/sync`, see #9936
|
||||
attrs = ">=19.2.0,!=21.1.0"
|
||||
netaddr = ">=0.7.18"
|
||||
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
|
||||
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
||||
typing-extensions = ">=3.10.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
ijson = ">=3.1.4"
|
||||
matrix-common = "~=1.1.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
matrix-synapse-ldap3 = { version = ">=0.1", optional = true }
|
||||
psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true }
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.14.0", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
# Note: systemd-python 231 appears to have been yanked from pypi
|
||||
systemd-python = { version = ">=231", optional = true }
|
||||
lxml = { version = ">=4.2.0", optional = true }
|
||||
sentry-sdk = { version = ">=0.7.2", optional = true }
|
||||
opentracing = { version = ">=2.2.0", optional = true }
|
||||
jaeger-client = { version = ">=4.0.0", optional = true }
|
||||
pyjwt = { version = ">=1.6.4", optional = true }
|
||||
txredisapi = { version = ">=1.4.7", optional = true }
|
||||
hiredis = { version = "*", optional = true }
|
||||
Pympler = { version = "*", optional = true }
|
||||
parameterized = { version = ">=0.7.4", optional = true }
|
||||
idna = { version = ">=2.5", optional = true }
|
||||
|
||||
[tool.poetry.extras]
|
||||
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
|
||||
# twice: once here, and once in the `all` extra.
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
|
||||
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
|
||||
saml2 = ["pysaml2"]
|
||||
oidc = ["authlib"]
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
systemd = ["systemd-python"]
|
||||
url_preview = ["lxml"]
|
||||
sentry = ["sentry-sdk"]
|
||||
opentracing = ["jaeger-client", "opentracing"]
|
||||
jwt = ["pyjwt"]
|
||||
# hiredis is not a *strict* dependency, but it makes things much faster.
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
redis = ["txredisapi", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache_memory = ["pympler"]
|
||||
test = ["parameterized", "idna"]
|
||||
|
||||
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
|
||||
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
|
||||
# 1) for new installations, I want instructions in existing documentation and tutorials
|
||||
# out there to still work.
|
||||
# 2) I don't want to hard-code a list of extras into CI if I can help it. The ideal
|
||||
# solution here would be something like https://github.com/python-poetry/poetry/issues/3413
|
||||
# Poetry 1.2's dependency groups might make this easier. But I'm not trying that out
|
||||
# until there's a stable release of 1.2.
|
||||
#
|
||||
# NB: the strings in this list must be *package* names, not extra names.
|
||||
# Some of our extra names _are_ package names, which can lead to great confusion.
|
||||
all = [
|
||||
# matrix-synapse-ldap3
|
||||
"matrix-synapse-ldap3",
|
||||
# postgres
|
||||
"psycopg2", "psycopg2cffi", "psycopg2cffi-compat",
|
||||
# saml2
|
||||
"pysaml2",
|
||||
# oidc
|
||||
"authlib",
|
||||
# url_preview
|
||||
"lxml",
|
||||
# sentry
|
||||
"sentry-sdk",
|
||||
# opentracing
|
||||
"jaeger-client", "opentracing",
|
||||
# jwt
|
||||
"pyjwt",
|
||||
#redis
|
||||
"txredisapi", "hiredis"
|
||||
# omitted:
|
||||
# - cache_memory: this is an experimental option
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
## We pin black so that our tests don't start failing on new releases.
|
||||
isort = "==5.7.0"
|
||||
black = "==22.3.0"
|
||||
flake8-comprehensions = "*"
|
||||
flake8-bugbear = "==21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
mypy = "==0.931"
|
||||
mypy-zope = "==0.3.5"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
types-opentracing = ">=2.4.2"
|
||||
types-Pillow = ">=8.3.4"
|
||||
types-psycopg2 = ">=2.9.9"
|
||||
types-pyOpenSSL = ">=20.0.7"
|
||||
types-PyYAML = ">=5.4.10"
|
||||
types-requests = ">=2.26.0"
|
||||
types-setuptools = ">=57.4.0"
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
# parameterized<0.7.4 can create classes with names that would normally be invalid
|
||||
# identifiers. trial really does not like this when running with multiple workers.
|
||||
parameterized = ">=0.7.4"
|
||||
idna = ">=2.5"
|
||||
|
||||
# The following are used by the release script
|
||||
click = "==8.1.0"
|
||||
redbaron = "==0.9.2"
|
||||
GitPython = "==3.1.14"
|
||||
commonmark = "==0.9.1"
|
||||
pygithub = "==1.55"
|
||||
# The following are executed as commands by the release script.
|
||||
twine = "*"
|
||||
# Towncrier min version comes from #3425. Rationale unclear.
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
@@ -19,7 +19,7 @@ if ! git diff --quiet FETCH_HEAD... -- debian; then
|
||||
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
|
||||
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
|
||||
echo "https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#debian-changelog" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@@ -32,7 +32,7 @@ fi
|
||||
|
||||
# Print a link to the contributing guide if the user makes a mistake
|
||||
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
|
||||
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
|
||||
https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog"
|
||||
|
||||
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
|
||||
python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)
|
||||
@@ -1,72 +0,0 @@
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import dns.resolver
|
||||
import urllib2
|
||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
|
||||
def get_targets(server_name):
|
||||
if ":" in server_name:
|
||||
target, port = server_name.split(":")
|
||||
yield (target, int(port))
|
||||
return
|
||||
try:
|
||||
answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||
for srv in answers:
|
||||
yield (srv.target, srv.port)
|
||||
except dns.resolver.NXDOMAIN:
|
||||
yield (server_name, 8448)
|
||||
|
||||
|
||||
def get_server_keys(server_name, target, port):
|
||||
url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
keys = json.load(urllib2.urlopen(url))
|
||||
verify_keys = {}
|
||||
for key_id, key_base64 in keys["verify_keys"].items():
|
||||
verify_key = decode_verify_key_bytes(key_id, decode_base64(key_base64))
|
||||
verify_signed_json(keys, server_name, verify_key)
|
||||
verify_keys[key_id] = verify_key
|
||||
return verify_keys
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("signature_name")
|
||||
parser.add_argument(
|
||||
"input_json", nargs="?", type=argparse.FileType("r"), default=sys.stdin
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig()
|
||||
|
||||
server_name = args.signature_name
|
||||
keys = {}
|
||||
for target, port in get_targets(server_name):
|
||||
try:
|
||||
keys = get_server_keys(server_name, target, port)
|
||||
print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
|
||||
write_signing_keys(sys.stdout, keys.values())
|
||||
break
|
||||
except Exception:
|
||||
logging.exception("Error talking to %s:%s", target, port)
|
||||
|
||||
json_to_check = json.load(args.input_json)
|
||||
print("Checking JSON:")
|
||||
for key_id in json_to_check["signatures"][args.signature_name]:
|
||||
try:
|
||||
key = keys[key_id]
|
||||
verify_signed_json(json_to_check, args.signature_name, key)
|
||||
print("PASS %s" % (key_id,))
|
||||
except Exception:
|
||||
logging.exception("Check for key %s failed" % (key_id,))
|
||||
print("FAIL %s" % (key_id,))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -50,25 +50,18 @@ if [[ -n "$WORKERS" ]]; then
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
|
||||
# And provide some more configuration to complement.
|
||||
export COMPLEMENT_CA=true
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=25
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
|
||||
else
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
|
||||
COMPLEMENT_DOCKERFILE=Dockerfile
|
||||
fi
|
||||
|
||||
# Build the Complement image from the Synapse image we just built.
|
||||
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
EXTRA_COMPLEMENT_ARGS=""
|
||||
if [[ -n "$1" ]]; then
|
||||
# A test name regex has been set, supply it to Complement
|
||||
EXTRA_COMPLEMENT_ARGS+="-run $1 "
|
||||
fi
|
||||
docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement"
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
||||
cd "$COMPLEMENT_DIR"
|
||||
go test -v -tags synapse_blacklist,msc2716,msc3030 -count=1 "$@" ./tests/...
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import argparse
|
||||
import ast
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class DefinitionVisitor(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.functions = {}
|
||||
self.classes = {}
|
||||
self.names = {}
|
||||
self.attrs = set()
|
||||
self.definitions = {
|
||||
"def": self.functions,
|
||||
"class": self.classes,
|
||||
"names": self.names,
|
||||
"attrs": self.attrs,
|
||||
}
|
||||
|
||||
def visit_Name(self, node):
|
||||
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
self.attrs.add(node.attr)
|
||||
for child in ast.iter_child_nodes(node):
|
||||
self.visit(child)
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.classes[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.functions[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
|
||||
def non_empty(defs):
|
||||
functions = {name: non_empty(f) for name, f in defs["def"].items()}
|
||||
classes = {name: non_empty(f) for name, f in defs["class"].items()}
|
||||
result = {}
|
||||
if functions:
|
||||
result["def"] = functions
|
||||
if classes:
|
||||
result["class"] = classes
|
||||
names = defs["names"]
|
||||
uses = []
|
||||
for name in names.get("Load", ()):
|
||||
if name not in names.get("Param", ()) and name not in names.get("Store", ()):
|
||||
uses.append(name)
|
||||
uses.extend(defs["attrs"])
|
||||
if uses:
|
||||
result["uses"] = uses
|
||||
result["names"] = names
|
||||
result["attrs"] = defs["attrs"]
|
||||
return result
|
||||
|
||||
|
||||
def definitions_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = DefinitionVisitor()
|
||||
visitor.visit(input_ast)
|
||||
definitions = non_empty(visitor.definitions)
|
||||
return definitions
|
||||
|
||||
|
||||
def definitions_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
return definitions_in_code(f.read())
|
||||
|
||||
|
||||
def defined_names(prefix, defs, names):
|
||||
for name, funcs in defs.get("def", {}).items():
|
||||
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
for name, funcs in defs.get("class", {}).items():
|
||||
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
|
||||
def used_names(prefix, item, defs, names):
|
||||
for name, funcs in defs.get("def", {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
for name, funcs in defs.get("class", {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
path = prefix.rstrip(".")
|
||||
for used in defs.get("uses", ()):
|
||||
if used in names:
|
||||
if item:
|
||||
names[item].setdefault("uses", []).append(used)
|
||||
names[used].setdefault("used", {}).setdefault(item, []).append(path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Find definitions.")
|
||||
parser.add_argument(
|
||||
"--unused", action="store_true", help="Only list unused definitions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"directories",
|
||||
nargs="+",
|
||||
metavar="DIR",
|
||||
help="Directories to search for definitions",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referrers",
|
||||
default=0,
|
||||
type=int,
|
||||
help="Include referrers up to the given depth",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred",
|
||||
default=0,
|
||||
type=int,
|
||||
help="Include referred down to the given depth",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
definitions = {}
|
||||
for directory in args.directories:
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
definitions[filepath] = definitions_in_file(filepath)
|
||||
|
||||
names = {}
|
||||
for filepath, defs in definitions.items():
|
||||
defined_names(filepath + ":", defs, names)
|
||||
|
||||
for filepath, defs in definitions.items():
|
||||
used_names(filepath + ":", None, defs, names)
|
||||
|
||||
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
||||
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
||||
|
||||
result = {}
|
||||
for name, definition in names.items():
|
||||
if patterns and not any(pattern.match(name) for pattern in patterns):
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
if args.unused and definition.get("used"):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referrer_depth = args.referrers
|
||||
referrers = set()
|
||||
while referrer_depth:
|
||||
referrer_depth -= 1
|
||||
for entry in result.values():
|
||||
for used_by in entry.get("used", ()):
|
||||
referrers.add(used_by)
|
||||
for name, definition in names.items():
|
||||
if name not in referrers:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referred_depth = args.referred
|
||||
referred = set()
|
||||
while referred_depth:
|
||||
referred_depth -= 1
|
||||
for entry in result.values():
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if name not in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
if args.format == "yaml":
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == "dot":
|
||||
print("digraph {")
|
||||
for name, entry in result.items():
|
||||
print(name)
|
||||
for used_by in entry.get("used", ()):
|
||||
if used_by in result:
|
||||
print(used_by, "->", name)
|
||||
print("}")
|
||||
else:
|
||||
raise ValueError("Unknown format %r" % (args.format))
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
||||
|
||||
check() {
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1
|
||||
}
|
||||
|
||||
if [ "$1" == "--check" ]; then
|
||||
diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||
./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG"
|
||||
fi
|
||||
Executable
+28
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
|
||||
|
||||
check() {
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1
|
||||
}
|
||||
|
||||
if [ "$1" == "--check" ]; then
|
||||
diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||
synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG"
|
||||
fi
|
||||
@@ -1,81 +0,0 @@
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.crypto.event_signing import (
|
||||
add_event_pdu_content_hash,
|
||||
compute_pdu_event_reference_hash,
|
||||
)
|
||||
from synapse.federation.units import Pdu
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.pdu import PduStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
|
||||
|
||||
class Store:
|
||||
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
|
||||
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
|
||||
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
|
||||
_get_pdu_origin_signatures_txn = SignatureStore.__dict__[
|
||||
"_get_pdu_origin_signatures_txn"
|
||||
]
|
||||
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
|
||||
_store_pdu_reference_hash_txn = SignatureStore.__dict__[
|
||||
"_store_pdu_reference_hash_txn"
|
||||
]
|
||||
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
|
||||
simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
|
||||
|
||||
|
||||
store = Store()
|
||||
|
||||
|
||||
def select_pdus(cursor):
|
||||
cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
|
||||
|
||||
ids = cursor.fetchall()
|
||||
|
||||
pdu_tuples = store._get_pdu_tuples(cursor, ids)
|
||||
|
||||
pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
|
||||
|
||||
reference_hashes = {}
|
||||
|
||||
for pdu in pdus:
|
||||
try:
|
||||
if pdu.prev_pdus:
|
||||
print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
for pdu_id, origin, hashes in pdu.prev_pdus:
|
||||
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
|
||||
hashes[ref_alg] = encode_base64(ref_hsh)
|
||||
store._store_prev_pdu_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
|
||||
)
|
||||
print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
pdu = add_event_pdu_content_hash(pdu)
|
||||
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
|
||||
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
|
||||
store._store_pdu_reference_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
|
||||
)
|
||||
|
||||
for alg, hsh_base64 in pdu.hashes.items():
|
||||
print(alg, hsh_base64)
|
||||
store._store_pdu_content_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
|
||||
|
||||
def main():
|
||||
conn = sqlite3.connect(sys.argv[1])
|
||||
cursor = conn.cursor()
|
||||
select_pdus(cursor)
|
||||
conn.commit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
+1
-16
@@ -80,22 +80,7 @@ else
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
# Note: this list aims to mirror the one in tox.ini
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
"scripts/export_signing_key"
|
||||
"scripts/generate_config"
|
||||
"scripts/generate_log_config"
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts/update_synapse_database"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
|
||||
)
|
||||
files=( "." )
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import argparse
|
||||
import ast
|
||||
import os
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
|
||||
PATTERNS_V1 = []
|
||||
PATTERNS_V2 = []
|
||||
|
||||
RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
|
||||
|
||||
|
||||
class CallVisitor(ast.NodeVisitor):
|
||||
def visit_Call(self, node):
|
||||
if isinstance(node.func, ast.Name):
|
||||
name = node.func.id
|
||||
else:
|
||||
return
|
||||
|
||||
if name == "client_patterns":
|
||||
PATTERNS_V2.append(node.args[0].s)
|
||||
|
||||
|
||||
def find_patterns_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = CallVisitor()
|
||||
visitor.visit(input_ast)
|
||||
|
||||
|
||||
def find_patterns_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
find_patterns_in_code(f.read())
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Find url patterns.")
|
||||
|
||||
parser.add_argument(
|
||||
"directories",
|
||||
nargs="+",
|
||||
metavar="DIR",
|
||||
help="Directories to search for definitions",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
find_patterns_in_file(filepath)
|
||||
|
||||
PATTERNS_V1.sort()
|
||||
PATTERNS_V2.sort()
|
||||
|
||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
||||
@@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
echo "Running db background jobs..."
|
||||
scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
@@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
# No coverage needed
|
||||
scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
else
|
||||
# Coverage desired
|
||||
coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
fi
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
|
||||
+68
-3
@@ -17,6 +17,8 @@
|
||||
"""An interactive script for doing a release. See `cli()` below.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -64,11 +66,15 @@ def cli():
|
||||
|
||||
./scripts-dev/release.py tag
|
||||
|
||||
# ... wait for asssets to build ...
|
||||
# ... wait for assets to build ...
|
||||
|
||||
./scripts-dev/release.py publish
|
||||
./scripts-dev/release.py upload
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
|
||||
./scripts-dev/release.py upload
|
||||
|
||||
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
|
||||
`tag`/`publish` command, then a new draft release will be created/published.
|
||||
"""
|
||||
@@ -209,8 +215,8 @@ def prepare():
|
||||
with open("synapse/__init__.py", "w") as f:
|
||||
f.write(parsed_synapse_ast.dumps())
|
||||
|
||||
# Generate changelogs
|
||||
run_until_successful("python3 -m towncrier", shell=True)
|
||||
# Generate changelogs.
|
||||
generate_and_write_changelog(current_version)
|
||||
|
||||
# Generate debian changelogs
|
||||
if parsed_new_version.pre is not None:
|
||||
@@ -413,6 +419,41 @@ def upload():
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce():
|
||||
"""Generate markdown to announce the release."""
|
||||
|
||||
current_version, _, _ = parse_version_from_module()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
click.echo(
|
||||
f"""
|
||||
Hi everyone. Synapse {current_version} has just been released.
|
||||
|
||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\
|
||||
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
||||
[debs](https://packages.matrix.org/debian/) | \
|
||||
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
||||
)
|
||||
|
||||
if "rc" in tag_name:
|
||||
click.echo(
|
||||
"""
|
||||
Announce the RC in
|
||||
- #homeowners:matrix.org (Synapse Announcements)
|
||||
- #synapse-dev:matrix.org"""
|
||||
)
|
||||
else:
|
||||
click.echo(
|
||||
"""
|
||||
Announce the release in
|
||||
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
|
||||
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
|
||||
- #synapse-dev:matrix.org
|
||||
- #synapse-package-maintainers:matrix.org"""
|
||||
)
|
||||
|
||||
|
||||
def parse_version_from_module() -> Tuple[
|
||||
version.Version, redbaron.RedBaron, redbaron.Node
|
||||
]:
|
||||
@@ -523,5 +564,29 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
|
||||
return "\n".join(version_changelog)
|
||||
|
||||
|
||||
def generate_and_write_changelog(current_version: version.Version):
|
||||
# We do this by getting a draft so that we can edit it before writing to the
|
||||
# changelog.
|
||||
result = run_until_successful(
|
||||
"python3 -m towncrier --draft", shell=True, capture_output=True
|
||||
)
|
||||
new_changes = result.stdout.decode("utf-8")
|
||||
new_changes = new_changes.replace(
|
||||
"No significant changes.", f"No significant changes since {current_version}."
|
||||
)
|
||||
|
||||
# Prepend changes to changelog
|
||||
with open("CHANGES.md", "r+") as f:
|
||||
existing_content = f.read()
|
||||
f.seek(0, 0)
|
||||
f.write(new_changes)
|
||||
f.write("\n")
|
||||
f.write(existing_content)
|
||||
|
||||
# Remove all the news fragments
|
||||
for f in glob.iglob("changelog.d/*.*"):
|
||||
os.remove(f)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
import collections
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
ROW_TYPES = {}
|
||||
|
||||
|
||||
def row_type_for_columns(name, column_names):
|
||||
column_names = tuple(column_names)
|
||||
row_type = ROW_TYPES.get((name, column_names))
|
||||
if row_type is None:
|
||||
row_type = collections.namedtuple(name, column_names)
|
||||
ROW_TYPES[(name, column_names)] = row_type
|
||||
return row_type
|
||||
|
||||
|
||||
def parse_response(content):
|
||||
streams = json.loads(content)
|
||||
result = {}
|
||||
for name, value in streams.items():
|
||||
row_type = row_type_for_columns(name, value["field_names"])
|
||||
position = value["position"]
|
||||
rows = [row_type(*row) for row in value["rows"]]
|
||||
result[name] = Entry(name, position, rows)
|
||||
return result
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(
|
||||
requests.get(
|
||||
server + "/_synapse/replication", verify=False, params=streams
|
||||
).content
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
server = sys.argv[1]
|
||||
|
||||
streams = None
|
||||
while not streams:
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams": "-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except Exception:
|
||||
sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
sys.stdout.write(repr(row) + "\n")
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script builds the Docker image to run the PostgreSQL tests, and then runs
|
||||
# the tests. It uses a dedicated tox environment so that we don't have to
|
||||
# rebuild it each time.
|
||||
|
||||
# Command line arguments to this script are forwarded to "tox" and then to "trial".
|
||||
|
||||
set -e
|
||||
|
||||
# Build, and tag
|
||||
docker build docker/ \
|
||||
--build-arg "UID=$(id -u)" \
|
||||
--build-arg "GID=$(id -g)" \
|
||||
-f docker/Dockerfile-pgtests \
|
||||
-t synapsepgtests
|
||||
|
||||
# Run, mounting the current directory into /src
|
||||
docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse._scripts.review_recent_signups import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use JSON::XS;
|
||||
use LWP::UserAgent;
|
||||
use URI::Escape;
|
||||
|
||||
if (@ARGV < 4) {
|
||||
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||
}
|
||||
|
||||
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
$ua->timeout(10);
|
||||
|
||||
if ($room_id =~ /^#/) {
|
||||
$room_id = uri_escape($room_id);
|
||||
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||
}
|
||||
|
||||
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||
my $group_users = [
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
];
|
||||
|
||||
die "refusing to sync from empty room" unless (@$room_users);
|
||||
die "refusing to sync to empty group" unless (@$group_users);
|
||||
|
||||
my $diff = {};
|
||||
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||
|
||||
foreach my $user (keys %$diff) {
|
||||
if ($diff->{$user} == 1) {
|
||||
warn "inviting $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
elsif ($diff->{$user} == -1) {
|
||||
warn "removing $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import glob
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
@@ -96,7 +95,7 @@ CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"isort==5.7.0",
|
||||
"black==21.12b0",
|
||||
"black==22.3.0",
|
||||
"flake8-comprehensions",
|
||||
"flake8-bugbear==21.3.2",
|
||||
"flake8",
|
||||
@@ -109,6 +108,7 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
|
||||
"types-jsonschema>=3.2.0",
|
||||
"types-opentracing>=2.4.2",
|
||||
"types-Pillow>=8.3.4",
|
||||
"types-psycopg2>=2.9.9",
|
||||
"types-pyOpenSSL>=20.0.7",
|
||||
"types-PyYAML>=5.4.10",
|
||||
"types-requests>=2.26.0",
|
||||
@@ -120,7 +120,7 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
#
|
||||
# parameterized_class decorator was introduced in parameterized 0.7.0
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "idna>=2.5"]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["dev"] = (
|
||||
CONDITIONAL_REQUIREMENTS["lint"]
|
||||
@@ -128,7 +128,7 @@ CONDITIONAL_REQUIREMENTS["dev"] = (
|
||||
+ CONDITIONAL_REQUIREMENTS["test"]
|
||||
+ [
|
||||
# The following are used by the release script
|
||||
"click==7.1.2",
|
||||
"click==8.1.0",
|
||||
"redbaron==0.9.2",
|
||||
"GitPython==3.1.14",
|
||||
"commonmark==0.9.1",
|
||||
@@ -153,8 +153,20 @@ setup(
|
||||
python_requires="~=3.7",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
# Application
|
||||
"synapse_homeserver = synapse.app.homeserver:main",
|
||||
"synapse_worker = synapse.app.generic_worker:main",
|
||||
"synctl = synapse._scripts.synctl:main",
|
||||
# Scripts
|
||||
"export_signing_key = synapse._scripts.export_signing_key:main",
|
||||
"generate_config = synapse._scripts.generate_config:main",
|
||||
"generate_log_config = synapse._scripts.generate_log_config:main",
|
||||
"generate_signing_key = synapse._scripts.generate_signing_key:main",
|
||||
"hash_password = synapse._scripts.hash_password:main",
|
||||
"register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
|
||||
"synapse_port_db = synapse._scripts.synapse_port_db:main",
|
||||
"synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
|
||||
"update_synapse_database = synapse._scripts.update_synapse_database:main",
|
||||
]
|
||||
},
|
||||
classifiers=[
|
||||
@@ -167,6 +179,5 @@ setup(
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
],
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={"test": TestCommand},
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ from twisted.internet import protocol
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
def publish(self, channel: str, message: bytes) -> "Deferred[None]": ...
|
||||
def ping(self) -> "Deferred[None]": ...
|
||||
def set(
|
||||
self,
|
||||
@@ -52,11 +52,14 @@ def lazyConnection(
|
||||
convertNumbers: bool = ...,
|
||||
) -> RedisProtocol: ...
|
||||
|
||||
class ConnectionHandler: ...
|
||||
# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies
|
||||
# most methods to it via ConnectionHandler.__getattr__.
|
||||
class ConnectionHandler(RedisProtocol):
|
||||
def disconnect(self) -> "Deferred[None]": ...
|
||||
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
continueTrying: bool
|
||||
handler: RedisProtocol
|
||||
handler: ConnectionHandler
|
||||
pool: List[RedisProtocol]
|
||||
replyTimeout: Optional[int]
|
||||
def __init__(
|
||||
|
||||
+22
-1
@@ -25,6 +25,27 @@ if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
|
||||
try:
|
||||
from incremental import Version
|
||||
|
||||
import twisted
|
||||
|
||||
# We need a bugfix that is included in Twisted 21.2.0:
|
||||
# https://twistedmatrix.com/trac/ticket/9787
|
||||
if twisted.version < Version("Twisted", 21, 2, 0):
|
||||
print("Using asyncio reactor requires Twisted>=21.2.0")
|
||||
sys.exit(1)
|
||||
|
||||
import asyncio
|
||||
|
||||
from twisted.internet import asyncioreactor
|
||||
|
||||
asyncioreactor.install(asyncio.get_event_loop())
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||
# actually start Synapse will import these libraries fine.
|
||||
@@ -47,7 +68,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.54.0rc1"
|
||||
__version__ = "1.57.1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -15,19 +15,19 @@
|
||||
import argparse
|
||||
import sys
|
||||
import time
|
||||
from typing import Optional
|
||||
from typing import NoReturn, Optional
|
||||
|
||||
import nacl.signing
|
||||
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
|
||||
from signedjson.types import VerifyKey
|
||||
|
||||
|
||||
def exit(status: int = 0, message: Optional[str] = None):
|
||||
def exit(status: int = 0, message: Optional[str] = None) -> NoReturn:
|
||||
if message:
|
||||
print(message, file=sys.stderr)
|
||||
sys.exit(status)
|
||||
|
||||
|
||||
def format_plain(public_key: nacl.signing.VerifyKey):
|
||||
def format_plain(public_key: VerifyKey) -> None:
|
||||
print(
|
||||
"%s:%s %s"
|
||||
% (
|
||||
@@ -38,7 +38,7 @@ def format_plain(public_key: nacl.signing.VerifyKey):
|
||||
)
|
||||
|
||||
|
||||
def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
|
||||
def format_for_config(public_key: VerifyKey, expiry_ts: int) -> None:
|
||||
print(
|
||||
' "%s:%s": { key: "%s", expired_ts: %i }'
|
||||
% (
|
||||
@@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@@ -85,7 +85,6 @@ if __name__ == "__main__":
|
||||
else format_plain
|
||||
)
|
||||
|
||||
keys = []
|
||||
for file in args.key_file:
|
||||
try:
|
||||
res = read_signing_keys(file)
|
||||
@@ -95,6 +94,9 @@ if __name__ == "__main__":
|
||||
message="Error reading key from file %s: %s %s"
|
||||
% (file.name, type(e), e),
|
||||
)
|
||||
res = []
|
||||
for key in res:
|
||||
formatter(get_verify_key(key))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -6,7 +6,8 @@ import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
@@ -76,3 +77,7 @@ if __name__ == "__main__":
|
||||
shutil.copyfileobj(args.header_file, args.output_file)
|
||||
|
||||
args.output_file.write(conf)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -19,7 +19,8 @@ import sys
|
||||
|
||||
from synapse.config.logger import DEFAULT_LOG_CONFIG
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@@ -42,3 +43,7 @@ if __name__ == "__main__":
|
||||
out = args.output_file
|
||||
out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))
|
||||
out.flush()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -19,7 +19,8 @@ from signedjson.key import generate_signing_key, write_signing_keys
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
@@ -34,3 +35,7 @@ if __name__ == "__main__":
|
||||
key_id = "a_" + random_string(4)
|
||||
key = (generate_signing_key(key_id),)
|
||||
write_signing_keys(args.output_file, key)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -8,11 +8,8 @@ import unicodedata
|
||||
import bcrypt
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds = 12
|
||||
password_pepper = ""
|
||||
|
||||
|
||||
def prompt_for_pass():
|
||||
def prompt_for_pass() -> str:
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
@@ -26,7 +23,10 @@ def prompt_for_pass():
|
||||
return password
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main() -> None:
|
||||
bcrypt_rounds = 12
|
||||
password_pepper = ""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Calculate the hash of a new password, so that passwords can be reset"
|
||||
@@ -77,3 +77,7 @@ if __name__ == "__main__":
|
||||
).decode("ascii")
|
||||
|
||||
print(hashed)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
+17
-13
@@ -28,7 +28,7 @@ This can be extracted from postgres with::
|
||||
|
||||
To use, pipe the above into::
|
||||
|
||||
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||
PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -42,7 +42,7 @@ from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def main(src_repo, dest_repo):
|
||||
def main(src_repo: str, dest_repo: str) -> None:
|
||||
src_paths = MediaFilePaths(src_repo)
|
||||
dest_paths = MediaFilePaths(dest_repo)
|
||||
for line in sys.stdin:
|
||||
@@ -55,14 +55,19 @@ def main(src_repo, dest_repo):
|
||||
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||
|
||||
|
||||
def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
def move_media(
|
||||
origin_server: str,
|
||||
file_id: str,
|
||||
src_paths: MediaFilePaths,
|
||||
dest_paths: MediaFilePaths,
|
||||
) -> None:
|
||||
"""Move the given file, and any thumbnails, to the dest repo
|
||||
|
||||
Args:
|
||||
origin_server (str):
|
||||
file_id (str):
|
||||
src_paths (MediaFilePaths):
|
||||
dest_paths (MediaFilePaths):
|
||||
origin_server:
|
||||
file_id:
|
||||
src_paths:
|
||||
dest_paths:
|
||||
"""
|
||||
logger.info("%s/%s", origin_server, file_id)
|
||||
|
||||
@@ -91,7 +96,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
)
|
||||
|
||||
|
||||
def mkdir_and_move(original_file, dest_file):
|
||||
def mkdir_and_move(original_file: str, dest_file: str) -> None:
|
||||
dirname = os.path.dirname(dest_file)
|
||||
if not os.path.exists(dirname):
|
||||
logger.debug("mkdir %s", dirname)
|
||||
@@ -109,10 +114,9 @@ if __name__ == "__main__":
|
||||
parser.add_argument("dest_repo", help="Path to source content repo")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
logging.basicConfig(**logging_config)
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.v else logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
)
|
||||
|
||||
main(args.src_repo, args.dest_repo)
|
||||
@@ -22,7 +22,7 @@ import logging
|
||||
import sys
|
||||
from typing import Callable, Optional
|
||||
|
||||
import requests as _requests
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ def request_registration(
|
||||
shared_secret: str,
|
||||
admin: bool = False,
|
||||
user_type: Optional[str] = None,
|
||||
requests=_requests,
|
||||
_print: Callable[[str], None] = print,
|
||||
exit: Callable[[int], None] = sys.exit,
|
||||
) -> None:
|
||||
|
||||
@@ -138,9 +138,7 @@ def main() -> None:
|
||||
config_args = parser.parse_args(sys.argv[1:])
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
config_dict = read_config_files(config_files)
|
||||
config.parse_config_dict(
|
||||
config_dict,
|
||||
)
|
||||
config.parse_config_dict(config_dict, "", "")
|
||||
|
||||
since_ms = time.time() * 1000 - Config.parse_duration(config_args.since)
|
||||
exclude_users_with_email = config_args.exclude_emails
|
||||
|
||||
@@ -21,12 +21,29 @@ import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from typing import Dict, Iterable, Optional, Set
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
cast,
|
||||
)
|
||||
|
||||
import yaml
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet import defer, reactor as reactor_
|
||||
|
||||
from synapse.config.database import DatabaseConnectionConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -35,7 +52,7 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.storage.database import DatabasePool, make_conn
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import PushRuleStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
@@ -66,8 +83,12 @@ from synapse.storage.databases.main.user_directory import (
|
||||
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import Clock
|
||||
|
||||
# Cast safety: Twisted does some naughty magic which replaces the
|
||||
# twisted.internet.reactor module with a Reactor instance at runtime.
|
||||
reactor = cast(ISynapseReactor, reactor_)
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
||||
@@ -97,6 +118,7 @@ BOOLEAN_COLUMNS = {
|
||||
"users": ["shadow_banned"],
|
||||
"e2e_fallback_keys_json": ["used"],
|
||||
"access_tokens": ["used"],
|
||||
"device_lists_changes_in_room": ["converted_to_destinations"],
|
||||
}
|
||||
|
||||
|
||||
@@ -158,12 +180,16 @@ IGNORED_TABLES = {
|
||||
|
||||
# Error returned by the run function. Used at the top-level part of the script to
|
||||
# handle errors and return codes.
|
||||
end_error = None # type: Optional[str]
|
||||
end_error: Optional[str] = None
|
||||
# The exec_info for the error, if any. If error is defined but not exec_info the script
|
||||
# will show only the error message without the stacktrace, if exec_info is defined but
|
||||
# not the error then the script will show nothing outside of what's printed in the run
|
||||
# function. If both are defined, the script will print both the error and the stacktrace.
|
||||
end_error_exec_info = None
|
||||
end_error_exec_info: Optional[
|
||||
Tuple[Type[BaseException], BaseException, TracebackType]
|
||||
] = None
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class Store(
|
||||
@@ -187,17 +213,19 @@ class Store(
|
||||
PresenceBackgroundUpdateStore,
|
||||
GroupServerWorkerStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
def execute_sql(self, sql, *args):
|
||||
def r(txn):
|
||||
def execute_sql(self, sql: str, *args: object) -> Awaitable[List[Tuple]]:
|
||||
def r(txn: LoggingTransaction) -> List[Tuple]:
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
|
||||
return self.db_pool.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
def insert_many_txn(
|
||||
self, txn: LoggingTransaction, table: str, headers: List[str], rows: List[Tuple]
|
||||
) -> None:
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
table,
|
||||
", ".join(k for k in headers),
|
||||
@@ -210,14 +238,15 @@ class Store(
|
||||
logger.exception("Failed to insert: %s", table)
|
||||
raise
|
||||
|
||||
def set_room_is_public(self, room_id, is_public):
|
||||
# Note: the parent method is an `async def`.
|
||||
def set_room_is_public(self, room_id: str, is_public: bool) -> NoReturn:
|
||||
raise Exception(
|
||||
"Attempt to set room_is_public during port_db: database not empty?"
|
||||
)
|
||||
|
||||
|
||||
class MockHomeserver:
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
self.clock = Clock(reactor)
|
||||
self.config = config
|
||||
self.hostname = config.server.server_name
|
||||
@@ -225,21 +254,30 @@ class MockHomeserver:
|
||||
"matrix-synapse"
|
||||
)
|
||||
|
||||
def get_clock(self):
|
||||
def get_clock(self) -> Clock:
|
||||
return self.clock
|
||||
|
||||
def get_reactor(self):
|
||||
def get_reactor(self) -> ISynapseReactor:
|
||||
return reactor
|
||||
|
||||
def get_instance_name(self):
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
|
||||
class Porter(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
class Porter:
|
||||
def __init__(
|
||||
self,
|
||||
sqlite_config: Dict[str, Any],
|
||||
progress: "Progress",
|
||||
batch_size: int,
|
||||
hs_config: HomeServerConfig,
|
||||
):
|
||||
self.sqlite_config = sqlite_config
|
||||
self.progress = progress
|
||||
self.batch_size = batch_size
|
||||
self.hs_config = hs_config
|
||||
|
||||
async def setup_table(self, table):
|
||||
async def setup_table(self, table: str) -> Tuple[str, int, int, int, int]:
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = await self.postgres_store.db_pool.simple_select_one(
|
||||
@@ -281,7 +319,7 @@ class Porter(object):
|
||||
)
|
||||
else:
|
||||
|
||||
def delete_all(txn):
|
||||
def delete_all(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,)
|
||||
)
|
||||
@@ -306,7 +344,7 @@ class Porter(object):
|
||||
async def get_table_constraints(self) -> Dict[str, Set[str]]:
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on."""
|
||||
|
||||
def _get_constraints(txn):
|
||||
def _get_constraints(txn: LoggingTransaction) -> Dict[str, Set[str]]:
|
||||
# We can pull the information about foreign key constraints out from
|
||||
# the postgres schema tables.
|
||||
sql = """
|
||||
@@ -322,7 +360,7 @@ class Porter(object):
|
||||
"""
|
||||
txn.execute(sql)
|
||||
|
||||
results = {}
|
||||
results: Dict[str, Set[str]] = {}
|
||||
for table, foreign_table in txn:
|
||||
results.setdefault(table, set()).add(foreign_table)
|
||||
return results
|
||||
@@ -332,8 +370,13 @@ class Porter(object):
|
||||
)
|
||||
|
||||
async def handle_table(
|
||||
self, table, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
self,
|
||||
table: str,
|
||||
postgres_size: int,
|
||||
table_size: int,
|
||||
forward_chunk: int,
|
||||
backward_chunk: int,
|
||||
) -> None:
|
||||
logger.info(
|
||||
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||
table,
|
||||
@@ -380,7 +423,9 @@ class Porter(object):
|
||||
|
||||
while True:
|
||||
|
||||
def r(txn):
|
||||
def r(
|
||||
txn: LoggingTransaction,
|
||||
) -> Tuple[Optional[List[str]], List[Tuple], List[Tuple]]:
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
@@ -407,6 +452,7 @@ class Porter(object):
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
assert headers is not None
|
||||
if frows:
|
||||
forward_chunk = max(row[0] for row in frows) + 1
|
||||
if brows:
|
||||
@@ -415,7 +461,8 @@ class Porter(object):
|
||||
rows = frows + brows
|
||||
rows = self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
def insert(txn: LoggingTransaction) -> None:
|
||||
assert headers is not None
|
||||
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||
|
||||
self.postgres_store.db_pool.simple_update_one_txn(
|
||||
@@ -437,8 +484,12 @@ class Porter(object):
|
||||
return
|
||||
|
||||
async def handle_search_table(
|
||||
self, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
self,
|
||||
postgres_size: int,
|
||||
table_size: int,
|
||||
forward_chunk: int,
|
||||
backward_chunk: int,
|
||||
) -> None:
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
@@ -449,7 +500,7 @@ class Porter(object):
|
||||
|
||||
while True:
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select, (forward_chunk, self.batch_size))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
@@ -463,7 +514,7 @@ class Porter(object):
|
||||
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
def insert(txn: LoggingTransaction) -> None:
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key,"
|
||||
" sender, vector, origin_server_ts, stream_ordering)"
|
||||
@@ -517,7 +568,7 @@ class Porter(object):
|
||||
self,
|
||||
db_config: DatabaseConnectionConfig,
|
||||
allow_outdated_version: bool = False,
|
||||
):
|
||||
) -> Store:
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
Args:
|
||||
@@ -539,12 +590,13 @@ class Porter(object):
|
||||
db_conn, allow_outdated_version=allow_outdated_version
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs)
|
||||
# Type safety: ignore that we're using Mock homeservers here.
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type]
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
|
||||
async def run_background_updates_on_postgres(self):
|
||||
async def run_background_updates_on_postgres(self) -> None:
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = (
|
||||
await self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
@@ -556,12 +608,12 @@ class Porter(object):
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
await self.postgres_store.db_pool.updates.do_next_background_update(100)
|
||||
await self.postgres_store.db_pool.updates.do_next_background_update(True)
|
||||
postgres_ready = await (
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
async def run(self):
|
||||
async def run(self) -> None:
|
||||
"""Ports the SQLite database to a PostgreSQL database.
|
||||
|
||||
When a fatal error is met, its message is assigned to the global "end_error"
|
||||
@@ -597,7 +649,7 @@ class Porter(object):
|
||||
|
||||
self.progress.set_state("Creating port tables")
|
||||
|
||||
def create_port_table(txn):
|
||||
def create_port_table(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
@@ -610,7 +662,7 @@ class Porter(object):
|
||||
# We want people to be able to rerun this script from an old port
|
||||
# so that they can pick up any missing events that were not
|
||||
# ported across.
|
||||
def alter_table(txn):
|
||||
def alter_table(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" RENAME rowid TO forward_rowid"
|
||||
@@ -723,12 +775,16 @@ class Porter(object):
|
||||
except Exception as e:
|
||||
global end_error_exec_info
|
||||
end_error = str(e)
|
||||
end_error_exec_info = sys.exc_info()
|
||||
# Type safety: we're in an exception handler, so the exc_info() tuple
|
||||
# will not be (None, None, None).
|
||||
end_error_exec_info = sys.exc_info() # type: ignore[assignment]
|
||||
logger.exception("")
|
||||
finally:
|
||||
reactor.stop()
|
||||
|
||||
def _convert_rows(self, table, headers, rows):
|
||||
def _convert_rows(
|
||||
self, table: str, headers: List[str], rows: List[Tuple]
|
||||
) -> List[Tuple]:
|
||||
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
||||
|
||||
bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names]
|
||||
@@ -736,7 +792,7 @@ class Porter(object):
|
||||
class BadValueException(Exception):
|
||||
pass
|
||||
|
||||
def conv(j, col):
|
||||
def conv(j: int, col: object) -> object:
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
if isinstance(col, bytes):
|
||||
@@ -762,7 +818,7 @@ class Porter(object):
|
||||
|
||||
return outrows
|
||||
|
||||
async def _setup_sent_transactions(self):
|
||||
async def _setup_sent_transactions(self) -> Tuple[int, int, int]:
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
|
||||
@@ -774,10 +830,10 @@ class Porter(object):
|
||||
")"
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select)
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
headers: List[str] = [column[0] for column in txn.description]
|
||||
|
||||
ts_ind = headers.index("ts")
|
||||
|
||||
@@ -791,7 +847,7 @@ class Porter(object):
|
||||
if inserted_rows:
|
||||
max_inserted_rowid = max(r[0] for r in rows)
|
||||
|
||||
def insert(txn):
|
||||
def insert(txn: LoggingTransaction) -> None:
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, "sent_transactions", headers[1:], rows
|
||||
)
|
||||
@@ -800,7 +856,7 @@ class Porter(object):
|
||||
else:
|
||||
max_inserted_rowid = 0
|
||||
|
||||
def get_start_id(txn):
|
||||
def get_start_id(txn: LoggingTransaction) -> int:
|
||||
txn.execute(
|
||||
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
||||
" ORDER BY rowid ASC LIMIT 1",
|
||||
@@ -825,12 +881,13 @@ class Porter(object):
|
||||
},
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
def get_sent_table_size(txn: LoggingTransaction) -> int:
|
||||
txn.execute(
|
||||
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
|
||||
)
|
||||
(size,) = txn.fetchone()
|
||||
return int(size)
|
||||
result = txn.fetchone()
|
||||
assert result is not None
|
||||
return int(result[0])
|
||||
|
||||
remaining_count = await self.sqlite_store.execute(get_sent_table_size)
|
||||
|
||||
@@ -838,25 +895,35 @@ class Porter(object):
|
||||
|
||||
return next_chunk, inserted_rows, total_count
|
||||
|
||||
async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
|
||||
async def _get_remaining_count_to_port(
|
||||
self, table: str, forward_chunk: int, backward_chunk: int
|
||||
) -> int:
|
||||
frows = cast(
|
||||
List[Tuple[int]],
|
||||
await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
|
||||
),
|
||||
)
|
||||
|
||||
brows = await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
|
||||
brows = cast(
|
||||
List[Tuple[int]],
|
||||
await self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
|
||||
),
|
||||
)
|
||||
|
||||
return frows[0][0] + brows[0][0]
|
||||
|
||||
async def _get_already_ported_count(self, table):
|
||||
async def _get_already_ported_count(self, table: str) -> int:
|
||||
rows = await self.postgres_store.execute_sql(
|
||||
"SELECT count(*) FROM %s" % (table,)
|
||||
)
|
||||
|
||||
return rows[0][0]
|
||||
|
||||
async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
async def _get_total_count_to_port(
|
||||
self, table: str, forward_chunk: int, backward_chunk: int
|
||||
) -> Tuple[int, int]:
|
||||
remaining, done = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
@@ -877,14 +944,17 @@ class Porter(object):
|
||||
return done, remaining + done
|
||||
|
||||
async def _setup_state_group_id_seq(self) -> None:
|
||||
curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
curr_id: Optional[
|
||||
int
|
||||
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
|
||||
)
|
||||
|
||||
if not curr_id:
|
||||
return
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> None:
|
||||
assert curr_id is not None
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
@@ -895,7 +965,7 @@ class Porter(object):
|
||||
"setup_user_id_seq", find_max_generated_user_id_localpart
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> None:
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
@@ -917,7 +987,7 @@ class Porter(object):
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
def _setup_events_stream_seqs_set_pos(txn):
|
||||
def _setup_events_stream_seqs_set_pos(txn: LoggingTransaction) -> None:
|
||||
if curr_forward_id:
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_stream_seq RESTART WITH %s",
|
||||
@@ -941,17 +1011,20 @@ class Porter(object):
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
allow_none=True,
|
||||
max_stream_id = cast(
|
||||
int,
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
allow_none=True,
|
||||
),
|
||||
)
|
||||
current_stream_ids.append(max_stream_id)
|
||||
|
||||
next_id = max(current_stream_ids) + 1
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> None:
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
|
||||
txn.execute(sql + " %s", (next_id,))
|
||||
|
||||
@@ -960,14 +1033,18 @@ class Porter(object):
|
||||
)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
curr_chain_id: Optional[
|
||||
int
|
||||
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
def r(txn: LoggingTransaction) -> None:
|
||||
# Presumably there is at least one row in event_auth_chains.
|
||||
assert curr_chain_id is not None
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
|
||||
(curr_chain_id + 1,),
|
||||
@@ -985,15 +1062,22 @@ class Porter(object):
|
||||
##############################################
|
||||
|
||||
|
||||
class Progress(object):
|
||||
class TableProgress(TypedDict):
|
||||
start: int
|
||||
num_done: int
|
||||
total: int
|
||||
perc: int
|
||||
|
||||
|
||||
class Progress:
|
||||
"""Used to report progress of the port"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
def __init__(self) -> None:
|
||||
self.tables: Dict[str, TableProgress] = {}
|
||||
|
||||
self.start_time = int(time.time())
|
||||
|
||||
def add_table(self, table, cur, size):
|
||||
def add_table(self, table: str, cur: int, size: int) -> None:
|
||||
self.tables[table] = {
|
||||
"start": cur,
|
||||
"num_done": cur,
|
||||
@@ -1001,19 +1085,22 @@ class Progress(object):
|
||||
"perc": int(cur * 100 / size),
|
||||
}
|
||||
|
||||
def update(self, table, num_done):
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
data = self.tables[table]
|
||||
data["num_done"] = num_done
|
||||
data["perc"] = int(num_done * 100 / data["total"])
|
||||
|
||||
def done(self):
|
||||
def done(self) -> None:
|
||||
pass
|
||||
|
||||
def set_state(self, state: str) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window"""
|
||||
|
||||
def __init__(self, stdscr):
|
||||
def __init__(self, stdscr: "curses.window"):
|
||||
self.stdscr = stdscr
|
||||
|
||||
curses.use_default_colors()
|
||||
@@ -1022,7 +1109,7 @@ class CursesProgress(Progress):
|
||||
curses.init_pair(1, curses.COLOR_RED, -1)
|
||||
curses.init_pair(2, curses.COLOR_GREEN, -1)
|
||||
|
||||
self.last_update = 0
|
||||
self.last_update = 0.0
|
||||
|
||||
self.finished = False
|
||||
|
||||
@@ -1031,7 +1118,7 @@ class CursesProgress(Progress):
|
||||
|
||||
super(CursesProgress, self).__init__()
|
||||
|
||||
def update(self, table, num_done):
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(CursesProgress, self).update(table, num_done)
|
||||
|
||||
self.total_processed = 0
|
||||
@@ -1042,7 +1129,7 @@ class CursesProgress(Progress):
|
||||
|
||||
self.render()
|
||||
|
||||
def render(self, force=False):
|
||||
def render(self, force: bool = False) -> None:
|
||||
now = time.time()
|
||||
|
||||
if not force and now - self.last_update < 0.2:
|
||||
@@ -1081,8 +1168,7 @@ class CursesProgress(Progress):
|
||||
left_margin = 5
|
||||
middle_space = 1
|
||||
|
||||
items = self.tables.items()
|
||||
items = sorted(items, key=lambda i: (i[1]["perc"], i[0]))
|
||||
items = sorted(self.tables.items(), key=lambda i: (i[1]["perc"], i[0]))
|
||||
|
||||
for i, (table, data) in enumerate(items):
|
||||
if i + 2 >= rows:
|
||||
@@ -1115,12 +1201,12 @@ class CursesProgress(Progress):
|
||||
self.stdscr.refresh()
|
||||
self.last_update = time.time()
|
||||
|
||||
def done(self):
|
||||
def done(self) -> None:
|
||||
self.finished = True
|
||||
self.render(True)
|
||||
self.stdscr.getch()
|
||||
|
||||
def set_state(self, state):
|
||||
def set_state(self, state: str) -> None:
|
||||
self.stdscr.clear()
|
||||
self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD)
|
||||
self.stdscr.refresh()
|
||||
@@ -1129,7 +1215,7 @@ class CursesProgress(Progress):
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table, num_done):
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
@@ -1138,7 +1224,7 @@ class TerminalProgress(Progress):
|
||||
"%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"])
|
||||
)
|
||||
|
||||
def set_state(self, state):
|
||||
def set_state(self, state: str) -> None:
|
||||
print(state + "...")
|
||||
|
||||
|
||||
@@ -1146,7 +1232,7 @@ class TerminalProgress(Progress):
|
||||
##############################################
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A script to port an existing synapse SQLite database to"
|
||||
" a new PostgreSQL database."
|
||||
@@ -1178,15 +1264,11 @@ if __name__ == "__main__":
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
|
||||
if args.curses:
|
||||
logging_config["filename"] = "port-synapse.log"
|
||||
|
||||
logging.basicConfig(**logging_config)
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.v else logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
filename="port-synapse.log" if args.curses else None,
|
||||
)
|
||||
|
||||
sqlite_config = {
|
||||
"name": "sqlite3",
|
||||
@@ -1216,7 +1298,8 @@ if __name__ == "__main__":
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
def start(stdscr=None):
|
||||
def start(stdscr: Optional["curses.window"] = None) -> None:
|
||||
progress: Progress
|
||||
if stdscr:
|
||||
progress = CursesProgress(stdscr)
|
||||
else:
|
||||
@@ -1230,7 +1313,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run():
|
||||
def run() -> Generator["defer.Deferred[Any]", Any, None]:
|
||||
with LoggingContext("synapse_port_db_run"):
|
||||
yield defer.ensureDeferred(porter.run())
|
||||
|
||||
@@ -1251,3 +1334,7 @@ if __name__ == "__main__":
|
||||
sys.stderr.write(end_error)
|
||||
|
||||
sys.exit(5)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -24,7 +24,7 @@ import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from typing import Iterable, Optional
|
||||
from typing import Iterable, NoReturn, Optional, TextIO
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -45,7 +45,7 @@ one of the following:
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
def pid_running(pid):
|
||||
def pid_running(pid: int) -> bool:
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError as err:
|
||||
@@ -68,7 +68,7 @@ def pid_running(pid):
|
||||
return True
|
||||
|
||||
|
||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||
def write(message: str, colour: str = NORMAL, stream: TextIO = sys.stdout) -> None:
|
||||
# Lets check if we're writing to a TTY before colouring
|
||||
should_colour = False
|
||||
try:
|
||||
@@ -84,7 +84,7 @@ def write(message, colour=NORMAL, stream=sys.stdout):
|
||||
stream.write(colour + message + NORMAL + "\n")
|
||||
|
||||
|
||||
def abort(message, colour=RED, stream=sys.stderr):
|
||||
def abort(message: str, colour: str = RED, stream: TextIO = sys.stderr) -> NoReturn:
|
||||
write(message, colour, stream)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -166,7 +166,7 @@ Worker = collections.namedtuple(
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
@@ -16,42 +16,47 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from typing import cast
|
||||
|
||||
import yaml
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet import defer, reactor as reactor_
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
# Cast safety: Twisted does some naughty magic which replaces the
|
||||
# twisted.internet.reactor module with a Reactor instance at runtime.
|
||||
reactor = cast(ISynapseReactor, reactor_)
|
||||
logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
|
||||
def __init__(self, config, **kwargs):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super(MockHomeserver, self).__init__(
|
||||
config.server.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
self.version_string = "Synapse/" + get_distribution_version_string(
|
||||
"matrix-synapse"
|
||||
hostname=config.server.server_name,
|
||||
config=config,
|
||||
reactor=reactor,
|
||||
version_string="Synapse/"
|
||||
+ get_distribution_version_string("matrix-synapse"),
|
||||
)
|
||||
|
||||
|
||||
def run_background_updates(hs):
|
||||
def run_background_updates(hs: HomeServer) -> None:
|
||||
store = hs.get_datastores().main
|
||||
|
||||
async def run_background_updates():
|
||||
async def run_background_updates() -> None:
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
def run():
|
||||
def run() -> None:
|
||||
# Apply all background updates on the database.
|
||||
defer.ensureDeferred(
|
||||
run_as_background_process("background_updates", run_background_updates)
|
||||
@@ -62,7 +67,7 @@ def run_background_updates(hs):
|
||||
reactor.run()
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Updates a synapse database to the latest schema and optionally runs background updates"
|
||||
@@ -85,12 +90,10 @@ def main():
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
|
||||
logging.basicConfig(**logging_config)
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.v else logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
)
|
||||
|
||||
# Load, process and sanity-check the config.
|
||||
hs_config = yaml.safe_load(args.database_config)
|
||||
@@ -23,7 +23,7 @@ from typing_extensions import Final
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2 ** 63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
|
||||
# the maximum length for a room alias is 255 characters
|
||||
MAX_ALIAS_LENGTH = 255
|
||||
@@ -178,7 +178,9 @@ class RelationTypes:
|
||||
ANNOTATION: Final = "m.annotation"
|
||||
REPLACE: Final = "m.replace"
|
||||
REFERENCE: Final = "m.reference"
|
||||
THREAD: Final = "io.element.thread"
|
||||
THREAD: Final = "m.thread"
|
||||
# TODO Remove this in Synapse >= v1.57.0.
|
||||
UNSTABLE_THREAD: Final = "io.element.thread"
|
||||
|
||||
|
||||
class LimitBlockingTypes:
|
||||
|
||||
+12
-11
@@ -88,7 +88,9 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||
# MSC3440, filtering by event relations.
|
||||
"related_by_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
@@ -318,19 +320,18 @@ class Filter:
|
||||
self.labels = filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
# Ideally these would be rejected at the endpoint if they were provided
|
||||
# and not supported, but that would involve modifying the JSON schema
|
||||
# based on the homeserver configuration.
|
||||
self.related_by_senders = self.filter_json.get("related_by_senders", None)
|
||||
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
|
||||
|
||||
# Fallback to the unstable prefix if the stable version is not given.
|
||||
if hs.config.experimental.msc3440_enabled:
|
||||
self.relation_senders = self.filter_json.get(
|
||||
self.related_by_senders = self.related_by_senders or self.filter_json.get(
|
||||
"io.element.relation_senders", None
|
||||
)
|
||||
self.relation_types = self.filter_json.get(
|
||||
"io.element.relation_types", None
|
||||
self.related_by_rel_types = (
|
||||
self.related_by_rel_types
|
||||
or self.filter_json.get("io.element.relation_types", None)
|
||||
)
|
||||
else:
|
||||
self.relation_senders = None
|
||||
self.relation_types = None
|
||||
|
||||
def filters_all_types(self) -> bool:
|
||||
return "*" in self.not_types
|
||||
@@ -461,7 +462,7 @@ class Filter:
|
||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||
event_ids_to_keep = set(
|
||||
await self._store.events_have_relations(
|
||||
event_ids, self.relation_senders, self.relation_types
|
||||
event_ids, self.related_by_senders, self.related_by_rel_types
|
||||
)
|
||||
)
|
||||
|
||||
@@ -474,7 +475,7 @@ class Filter:
|
||||
async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||
result = [event for event in events if self._check(event)]
|
||||
|
||||
if self.relation_senders or self.relation_types:
|
||||
if self.related_by_senders or self.related_by_rel_types:
|
||||
return await self._check_event_relations(result)
|
||||
|
||||
return result
|
||||
|
||||
@@ -130,7 +130,7 @@ def start_reactor(
|
||||
appname: str,
|
||||
soft_file_limit: int,
|
||||
gc_thresholds: Optional[Tuple[int, int, int]],
|
||||
pid_file: str,
|
||||
pid_file: Optional[str],
|
||||
daemonize: bool,
|
||||
print_pidfile: bool,
|
||||
logger: logging.Logger,
|
||||
@@ -171,6 +171,8 @@ def start_reactor(
|
||||
# appearing to go backwards.
|
||||
with PreserveLoggingContext():
|
||||
if daemonize:
|
||||
assert pid_file is not None
|
||||
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ from synapse.handlers.admin import ExfiltrationWriter
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
@@ -61,7 +60,6 @@ class AdminCmdSlavedStore(
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
|
||||
@@ -53,7 +53,6 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
@@ -247,7 +246,6 @@ class GenericWorkerSlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedFilteringStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
@@ -322,7 +320,8 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
if self.config.experimental.groups_enabled:
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({CLIENT_API_PREFIX: resource})
|
||||
|
||||
@@ -417,7 +416,7 @@ class GenericWorkerServer(HomeServer):
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
self.get_replication_command_handler().start_replication(self)
|
||||
|
||||
|
||||
def start(config_options: List[str]) -> None:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user