1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Action Bot
4520e0e31e Version picker added for v1.55 docs 2023-12-11 14:52:27 +00:00
427 changed files with 16815 additions and 30577 deletions

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

8
.ci/patch_for_twisted_trunk.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
# replaces the dependency on Twisted in `python_dependencies` with trunk.
set -e
cd "$(dirname "$0")"/..
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py

View File

@@ -2,24 +2,29 @@
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
@@ -32,14 +37,14 @@ else
fi
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory

View File

@@ -1,9 +1,6 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# this script is run by GitHub Actions in a plain `focal` container; it installs the
# minimal requirements for tox and hands over to the py3-old tox environment.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
@@ -12,70 +9,12 @@ set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
python3 python3-dev python3-pip python3-venv \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests
exec tox -e py3-old

View File

@@ -1,37 +1,43 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - sets up synapse and deps
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
@@ -42,12 +48,12 @@ echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \
.ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@@ -4,8 +4,8 @@
# things to include
!docker
!synapse
!MANIFEST.in
!README.rst
!pyproject.toml
!poetry.lock
!setup.py
**/__pycache__

View File

@@ -34,24 +34,32 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# TODO: consider using https://github.com/docker/metadata-action instead of this
# custom magic
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
run: |
case "${GITHUB_REF}" in
refs/heads/develop)
tag=develop
;;
refs/heads/master|refs/heads/main)
tag=latest
;;
refs/tags/*)
tag=${GITHUB_REF#refs/tags/}
;;
*)
tag=${GITHUB_SHA}
;;
esac
echo "::set-output name=tag::$tag"
- name: Build and push all platforms
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
mdbook-version: '0.4.9'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.

View File

@@ -1,159 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# TODO: run complement (as with twisted trunk, see #12473).
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -15,14 +15,25 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: pip install -e .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
runs-on: ubuntu-latest
strategy:
matrix:
toxenv:
- "check_codestyle"
- "check_isort"
- "mypy"
- "packaging"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install tox
- run: tox -e ${{ matrix.toxenv }}
lint-crlf:
runs-on: ubuntu-latest
@@ -61,23 +72,23 @@ jobs:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
toxenv: ["py"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
toxenv: "py-noextras"
# Oldest Python with PostgreSQL
- python-version: "3.7"
database: "postgres"
postgres-version: "10"
extras: "all"
toxenv: "py"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
toxenv: "py"
steps:
- uses: actions/checkout@v2
@@ -89,16 +100,17 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: pip install tox
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
- run: tox -e ${{ matrix.toxenv }}
env:
TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
@@ -117,7 +129,6 @@ jobs:
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
@@ -125,11 +136,11 @@ jobs:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with:
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -145,24 +156,23 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -261,10 +271,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
python-version: "3.9"
- run: .ci/scripts/test_export_data_command.sh
portdb:
@@ -299,10 +308,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
complement:
@@ -354,11 +362,27 @@ jobs:
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
# Build initial Synapse image
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
working-directory: synapse
env:
DOCKER_BUILDKIT: 1
# Build a ready-to-run Synapse image based on the initial image above.
# This new image includes a config file, keys for signing and TLS, and
# other settings to make it suitable for testing under Complement.
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
working-directory: complement/dockerfiles
# Run Complement
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
env:
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
working-directory: complement
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:

View File

@@ -6,27 +6,16 @@ on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
- uses: actions/setup-python@v2
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e mypy
trial:
runs-on: ubuntu-latest
@@ -34,15 +23,14 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v2
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
python-version: 3.7
- run: .ci/patch_for_twisted_trunk.sh
- run: pip install tox
- run: tox -e py
env:
TRIAL_FLAGS: "--jobs=2"
- name: Dump logs
# Logs are most useful when the command fails, always include them.
@@ -67,23 +55,11 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
run: .ci/patch_for_twisted_trunk.sh
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap

6
.gitignore vendored
View File

@@ -15,9 +15,6 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry lockfile.
!poetry.lock
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
@@ -33,9 +30,6 @@ __pycache__/
/media_store/
/uploads
# For direnv users
/.envrc
# IDEs
/.idea/
/.ropeproject/

9837
CHANGES.md

File diff suppressed because it is too large Load Diff

54
MANIFEST.in Normal file
View File

@@ -0,0 +1,54 @@
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
include synapse/py.typed
recursive-include synapse/storage *.sql
recursive-include synapse/storage *.sql.postgres
recursive-include synapse/storage *.sql.sqlite
recursive-include synapse/storage *.py
recursive-include synapse/storage *.txt
recursive-include synapse/storage *.md
recursive-include docs *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude .codecov.yml
exclude .coveragerc
exclude .dockerignore
exclude .editorconfig
exclude Dockerfile
exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
include book.toml
include pyproject.toml
recursive-include changelog.d *
include .flake8
prune .circleci
prune .github
prune .ci
prune contrib
prune debian
prune demo/etc
prune docker
prune stubs

View File

@@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
team, written in Python 3/Twisted.
In Matrix, every user runs one or more Matrix clients, which connect through to
@@ -293,42 +293,39 @@ directory of your choice::
git clone https://github.com/matrix-org/synapse.git
cd synapse
Synapse has a number of external dependencies. We maintain a fixed development
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
Synapse has a number of external dependencies, that are easiest
to install using pip and a virtualenv::
pip install --user pipx
pipx install poetry
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
for other installation methods.) Then ask poetry to create a virtual environment
from the project and install Synapse's dependencies::
poetry install --extras "all test"
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,dev]"
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
dependencies into a virtual env. If any dependencies fail to install,
try installing the failing modules individually::
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
pip install -e "module-name"
poetry run ./demo/start.sh
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
(to stop, you can use ``poetry run ./demo/stop.sh``)
./demo/start.sh
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
(to stop, you can use `./demo/stop.sh`)
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
for more information.
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
poetry run synapse_homeserver \
python -m synapse.app.homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
poetry run synapse_homeserver --config-path homeserver.yaml
python -m synapse.app.homeserver --config-path homeserver.yaml
Running the unit tests
@@ -337,7 +334,7 @@ Running the unit tests
After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
poetry run trial tests
trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::

View File

@@ -34,6 +34,14 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"

View File

@@ -66,18 +66,6 @@
],
"title": "Dashboards",
"type": "dashboards"
},
{
"asDropdown": false,
"icon": "external link",
"includeVars": false,
"keepTime": false,
"tags": [],
"targetBlank": true,
"title": "Synapse Documentation",
"tooltip": "Open Documentation",
"type": "link",
"url": "https://matrix-org.github.io/synapse/latest/"
}
],
"panels": [
@@ -10901,4 +10889,4 @@
"title": "Synapse",
"uid": "000000012",
"version": 100
}
}

View File

@@ -193,15 +193,12 @@ class TrivialXmppClient:
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)

View File

@@ -30,23 +30,9 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
;;
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0b1
poetry export \
--extras all \
--extras test \
--extras systemd \
-o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
# than the 2/3 compatible `virtualenv`.
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
@@ -55,11 +41,9 @@ dh_virtualenv \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
--extras="all,systemd,test"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"

78
debian/changelog vendored
View File

@@ -1,81 +1,3 @@
matrix-synapse-py3 (1.59.0) stable; urgency=medium
* New Synapse release 1.59.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 May 2022 10:26:50 +0100
matrix-synapse-py3 (1.59.0~rc2) stable; urgency=medium
* New Synapse release 1.59.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 16 May 2022 12:52:15 +0100
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
* Adjust how the `exported-requirements.txt` file is generated as part of
the process of building these packages. This affects the package
maintainers only; end-users are unaffected.
* New Synapse release 1.59.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
matrix-synapse-py3 (1.58.1) stable; urgency=medium
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
were incorrectly omitted from the 1.58.0 package.
* New Synapse release 1.58.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
matrix-synapse-py3 (1.58.0) stable; urgency=medium
* New Synapse release 1.58.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 May 2022 10:52:58 +0100
matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium
* New Synapse release 1.58.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 17:14:56 +0100
matrix-synapse-py3 (1.58.0~rc1) stable; urgency=medium
* Use poetry to manage the bundled virtualenv included with this package.
* New Synapse release 1.58.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Apr 2022 11:15:20 +0100
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Apr 2022 15:27:21 +0100
matrix-synapse-py3 (1.57.0) stable; urgency=medium
* New synapse release 1.57.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Apr 2022 10:58:42 +0100
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
* New synapse release 1.57.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Apr 2022 13:36:25 +0100
matrix-synapse-py3 (1.56.0) stable; urgency=medium
* New synapse release 1.56.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Apr 2022 12:38:39 +0100
matrix-synapse-py3 (1.56.0~rc1) stable; urgency=medium
* New synapse release 1.56.0~rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Mar 2022 10:40:50 +0100
matrix-synapse-py3 (1.55.2) stable; urgency=medium
* New synapse release 1.55.2.

1
debian/clean vendored
View File

@@ -1 +0,0 @@
exported_requirements.txt

View File

@@ -38,7 +38,6 @@ for port in 8080 8081 8082; do
printf '\n\n# Customisation made by demo/start.sh\n\n'
echo "public_baseurl: http://localhost:$port/"
echo 'enable_registration: true'
echo 'enable_registration_without_verification: true'
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces.

View File

@@ -1,4 +1,3 @@
# syntax=docker/dockerfile:1
# Dockerfile to build the matrixdotorg/synapse docker images.
#
# Note that it uses features which are only available in BuildKit - see
@@ -15,61 +14,20 @@
# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 .
#
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
###
### Stage 1: builder
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
#
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt, to improve rebuild speeds on
# slow connections.
#
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@@ -87,25 +45,30 @@ RUN \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy just what we need to pip install
COPY MANIFEST.in README.rst setup.py /synapse/
COPY synapse/__init__.py /synapse/synapse/__init__.py
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# the whole synapse project so that we this layer in the Docker cache can be
# used while you develop on the source
#
# This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml.
COPY --from=requirements /synapse/requirements.txt /synapse/
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt
pip install --prefix="/install" --no-warn-script-location \
/synapse[all]
# Copy over the rest of the synapse source code.
# Copy over the rest of the project
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
# Install the synapse package itself and all of its children packages.
#
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 2: runtime
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim

30
docker/Dockerfile-pgtests Normal file
View File

@@ -0,0 +1,30 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:focal
# The Sytest image doesn't come with python, so install that
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
# We need tox to run the tests in run_pg_tests.sh
RUN python3 -m pip install tox
# Initialise the db
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
# Add a user with our UID and GID so that files get created on the host owned
# by us, not root.
ARG UID
ARG GID
RUN groupadd --gid $GID user
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
# Ensure we can start postgres by sudo-ing as the postgres user.
RUN apt-get update && apt-get -qq install -y sudo
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
ADD run_pg_tests.sh /run_pg_tests.sh
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
# so that we can `docker run` this container and pass arguments to pg_tests.sh
ENTRYPOINT ["/run_pg_tests.sh"]
USER user

View File

@@ -2,36 +2,25 @@
FROM matrixdotorg/synapse
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
redis-server nginx-light
RUN apt-get update
RUN apt-get install -y supervisor redis nginx
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
# Disable the default nginx sites
# Remove the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Expose nginx listener port
EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -10,10 +10,10 @@ Note that running Synapse's unit tests from within the docker image is not suppo
## Testing with SQLite and single-process Synapse
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
The instructions below will set up Complement testing for a single-process,
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
@@ -26,22 +26,23 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement.
Next, build the Synapse image for Complement. You will need a local checkout
of Complement. Change to the root of your Complement checkout and run:
```sh
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
```
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
@@ -54,7 +55,7 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
@@ -63,20 +64,21 @@ docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`.
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
the root of your Complement checkout and run:
```sh
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
```
This will build an image with the tag `complement-synapse-workers`, which can be handed to
This will build an image with the tag `complement-synapse`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
@@ -89,10 +91,10 @@ bundling all necessary components together for a workerised homeserver instance.
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
a redis for worker communication and a supervisord instance to start up and monitor all
processes. You will need to provide your own postgres container to connect to, and TLS
processes. You will need to provide your own postgres container to connect to, and TLS
is not handled by the container.
Once you've built the image using the above instructions, you can run it. Be sure
Once you've built the image using the above instructions, you can run it. Be sure
you've set up a volume according to the [usual Synapse docker instructions](README.md).
Then run something along the lines of:
@@ -110,7 +112,7 @@ docker run -d --name synapse \
matrixdotorg/synapse-workers
```
...substituting `POSTGRES*` variables for those that match a postgres host you have
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
@@ -128,11 +130,11 @@ Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
after the timestamp.
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.

View File

@@ -1,22 +0,0 @@
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
ENV SERVER_NAME=localhost
COPY conf/* /conf/
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
WORKDIR /data
EXPOSE 8008 8448
ENTRYPOINT ["/conf/start.sh"]
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -1 +0,0 @@
Stuff for building the docker image used for testing under complement.

View File

@@ -1,50 +0,0 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
# custom CA.
# We include this near the top of the file in order to cache the result.
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
# Copy the entrypoint
COPY conf-workers/start-complement-synapse-workers.sh /
# Expose caddy's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@@ -1,72 +0,0 @@
{
"apps": {
"http": {
"servers": {
"srv0": {
"listen": [
":8448"
],
"routes": [
{
"match": [
{
"host": [
"{{ server_name }}"
]
}
],
"handle": [
{
"handler": "subroute",
"routes": [
{
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "localhost:8008"
}
]
}
]
}
]
}
],
"terminal": true
}
]
}
}
},
"tls": {
"automation": {
"policies": [
{
"subjects": [
"{{ server_name }}"
],
"issuers": [
{
"module": "internal"
}
],
"on_demand": true
}
]
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
}
}
}
}
}
}

View File

@@ -1,7 +0,0 @@
[program:caddy]
command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
autorestart=unexpected
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0

View File

@@ -1,16 +0,0 @@
[program:postgres]
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
# Lower priority number = starts first
priority=1
autorestart=unexpected
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
# Use 'Fast Shutdown' mode which aborts current transactions and closes connections quickly.
# (Default (TERM) is 'Smart Shutdown' which stops accepting new connections but
# lets existing connections close gracefully.)
stopsignal=INT

View File

@@ -1,44 +0,0 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Replace the server name in the caddy config
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -1,72 +0,0 @@
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
## Federation ##
# trust certs signed by Complement's CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
federation_ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,117 +0,0 @@
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -1,24 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@@ -5,11 +5,8 @@
nodaemon=true
user=root
[include]
files = /etc/supervisor/conf.d/*.conf
[program:nginx]
command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;"
command=/usr/sbin/nginx -g "daemon off;"
priority=500
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
@@ -19,7 +16,7 @@ username=www-data
autorestart=true
[program:redis]
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
@@ -29,7 +26,7 @@ username=redis
autorestart=true
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory

View File

@@ -2,7 +2,11 @@ version: 1
formatters:
precise:
{% if worker_name %}
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% else %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% endif %}
handlers:
{% if LOG_FILE_PATH %}

View File

@@ -29,7 +29,6 @@
import os
import subprocess
import sys
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
import jinja2
import yaml
@@ -37,7 +36,7 @@ import yaml
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
WORKERS_CONFIG = {
"pusher": {
"app": "synapse.app.pusher",
"listener_resources": [],
@@ -69,10 +68,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.appservice",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
"shared_extra_conf": {"notify_appservices": False},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -171,7 +170,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
command=/usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
@@ -201,7 +200,7 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
def log(txt: str):
"""Log something to the stdout.
Args:
@@ -210,7 +209,7 @@ def log(txt: str) -> None:
print(txt)
def error(txt: str) -> NoReturn:
def error(txt: str):
"""Log something and exit with an error code.
Args:
@@ -220,7 +219,7 @@ def error(txt: str) -> NoReturn:
sys.exit(2)
def convert(src: str, dst: str, **template_vars: object) -> None:
def convert(src: str, dst: str, **template_vars):
"""Generate a file from a template
Args:
@@ -290,7 +289,7 @@ def add_sharding_to_shared_config(
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
def generate_base_homeserver_config() -> None:
def generate_base_homeserver_config():
"""Starts Synapse and generates a basic homeserver config, which will later be
modified for worker support.
@@ -302,15 +301,13 @@ def generate_base_homeserver_config() -> None:
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
def generate_worker_files(
environ: Mapping[str, str], config_path: str, data_dir: str
) -> None:
def generate_worker_files(environ, config_path: str, data_dir: str):
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
"""
@@ -323,8 +320,7 @@ def generate_worker_files(
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result to the shared
# config file.
# another listener for replication. Later we'll write out the result.
listeners = [
{
"port": 9093,
@@ -343,7 +339,7 @@ def generate_worker_files(
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
shared_config = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
@@ -359,7 +355,7 @@ def generate_worker_files(
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
nginx_upstreams = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
@@ -371,13 +367,13 @@ def generate_worker_files(
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types_env is None:
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma
worker_types = worker_types_env.split(",")
worker_types = worker_types.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -388,11 +384,7 @@ def generate_worker_files(
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
worker_type_counter = {}
# For each worker type specified by the user, create config values
for worker_type in worker_types:
@@ -412,14 +404,12 @@ def generate_worker_files(
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
{"name": worker_name, "port": worker_port, "config_path": config_path}
)
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
@@ -448,7 +438,21 @@ def generate_worker_files(
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file
convert(
@@ -471,10 +475,15 @@ def generate_worker_files(
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
# At the same time, prepare a list of internal endpoints to healthcheck
# starting with the main process which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,)
healthcheck_urls.append("http://localhost:%d/health" % (port,))
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
@@ -484,10 +493,6 @@ def generate_worker_files(
# Finally, we'll write out the config files.
# log config for the master process
master_log_config = generate_worker_log_config(environ, "master", data_dir)
shared_config["log_config"] = master_log_config
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
@@ -504,10 +509,9 @@ def generate_worker_files(
)
# Supervisord config
os.makedirs("/etc/supervisor", exist_ok=True)
convert(
"/conf/supervisord.conf.j2",
"/etc/supervisor/supervisord.conf",
"/etc/supervisor/conf.d/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
@@ -525,31 +529,15 @@ def generate_worker_files(
os.mkdir(log_dir)
def generate_worker_log_config(
environ: Mapping[str, str], worker_name: str, data_dir: str
) -> str:
"""Generate a log.config file for the given worker.
def start_supervisord():
"""Starts up supervisord which then starts and monitors all other necessary processes
Returns: the path to the generated file
Raises: CalledProcessError if calling start.py return a non-zero exit code.
"""
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
return log_config_filepath
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
def main(args, environ):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
@@ -576,13 +564,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
os.execl(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
)
start_supervisord()
if __name__ == "__main__":

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Prefixes all lines on stdout and stderr with the process name (as determined by
# the SUPERVISOR_PROCESS_NAME env var, which is automatically set by Supervisor).
#
# Usage:
# prefix-log command [args...]
#
exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1)
exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2)
exec "$@"

19
docker/run_pg_tests.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Start the database
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=./.tox-pg-container -e py37-postgres "$@"

View File

@@ -6,28 +6,27 @@ import os
import platform
import subprocess
import sys
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional
import jinja2
# Utility functions
def log(txt: str) -> None:
def log(txt):
print(txt, file=sys.stderr)
def error(txt: str) -> NoReturn:
def error(txt):
log(txt)
sys.exit(2)
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
def convert(src, dst, environ):
"""Generate a file from a template
Args:
src: path to input file
dst: path to file to write
environ: environment dictionary, for replacement mappings.
src (str): path to input file
dst (str): path to file to write
environ (dict): environment dictionary, for replacement mappings.
"""
with open(src) as infile:
template = infile.read()
@@ -36,30 +35,25 @@ def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
outfile.write(rendered)
def generate_config_from_template(
config_dir: str,
config_path: str,
os_environ: Mapping[str, str],
ownership: Optional[str],
) -> None:
def generate_config_from_template(config_dir, config_path, environ, ownership):
"""Generate a homeserver.yaml from environment variables
Args:
config_dir: where to put generated config files
config_path: where to put the main config file
os_environ: environment mapping
ownership: "<user>:<group>" string which will be used to set
config_dir (str): where to put generated config files
config_path (str): where to put the main config file
environ (dict): environment dictionary
ownership (str|None): "<user>:<group>" string which will be used to set
ownership of the generated configs. If None, ownership will not change.
"""
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in os_environ:
if v not in environ:
error(
"Environment variable '%s' is mandatory when generating a config file."
% (v,)
)
# populate some params from data files (if they exist, else create new ones)
environ: Dict[str, Any] = dict(os_environ)
environ = environ.copy()
secrets = {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
@@ -114,7 +108,7 @@ def generate_config_from_template(
# Hopefully we already have a signing key, but generate one if not.
args = [
sys.executable,
"python",
"-m",
"synapse.app.homeserver",
"--config-path",
@@ -133,12 +127,12 @@ def generate_config_from_template(
subprocess.check_output(args)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
def run_generate_config(environ, ownership):
"""Run synapse with a --generate-config param to generate a template config file
Args:
environ: env vars from `os.enrivon`.
ownership: "userid:groupid" arg for chmod. If None, ownership will not change.
environ (dict): env var dict
ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change.
Never returns.
"""
@@ -164,7 +158,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
# generate the main config file, and a signing key.
args = [
sys.executable,
"python",
"-m",
"synapse.app.homeserver",
"--server-name",
@@ -181,10 +175,10 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
os.execv(sys.executable, args)
os.execv("/usr/local/bin/python", args)
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
def main(args, environ):
mode = args[1] if len(args) > 1 else "run"
# if we were given an explicit user to switch to, do so
@@ -260,12 +254,12 @@ running with 'migrate_config'. See the README for more details.
log("Starting synapse with args " + " ".join(args))
args = [sys.executable] + args
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
else:
os.execve(sys.executable, args, environ)
os.execve("/usr/local/bin/python", args, environ)
if __name__ == "__main__":

View File

@@ -17,7 +17,6 @@
# Usage
- [Federation](federate.md)
- [Configuration](usage/configuration/README.md)
- [Configuration Manual](usage/configuration/config_documentation.md)
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
- [Structured Logging](structured_logging.md)
@@ -46,7 +45,6 @@
- [Account validity callbacks](modules/account_validity_callbacks.md)
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
- [Account data callbacks](modules/account_data_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)

View File

@@ -804,7 +804,7 @@ POST /_synapse/admin/v2/users/<user_id>/delete_devices
"devices": [
"QBUAZIFURK",
"AUIECTSRND"
]
],
}
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
This directory contains changelogs for previous years.

View File

@@ -6,36 +6,62 @@ The Synapse codebase uses a number of code formatting tools in order to
quickly and automatically check for formatting (and sometimes logical)
errors in code.
The necessary tools are:
The necessary tools are detailed below.
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
Install them with:
First install them with:
```sh
pip install -e ".[lint,mypy]"
```
The easiest way to run the lints is to invoke the linter script as follows.
- **black**
```sh
scripts-dev/lint.sh
```
The Synapse codebase uses [black](https://pypi.org/project/black/)
as an opinionated code formatter, ensuring all comitted code is
properly formatted.
Have `black` auto-format your code (it shouldn't change any
functionality) with:
```sh
black . --exclude="\.tox|build|env"
```
- **flake8**
`flake8` is a code checking tool. We require code to pass `flake8`
before being merged into the codebase.
Check all application and test code with:
```sh
flake8 synapse tests
```
- **isort**
`isort` ensures imports are nicely formatted, and can suggest and
auto-fix issues such as double-importing.
Auto-fix imports with:
```sh
isort -rc synapse tests
```
`-rc` means to recursively search the given directories.
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient
development workflow. It is not, however, recommended to run `flake8` or `mypy`
on save as they take a while and can be very resource intensive.
development workflow. It is not, however, recommended to run `flake8` on
save as it takes a while and is very resource intensive.
## General rules
- **Naming**:
- Use `CamelCase` for class and type names
- Use underscores for `function_names` and `variable_names`.
- Use camel case for class and type names
- Use underscores for functions and variables.
- **Docstrings**: should follow the [google code
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
See the

View File

@@ -48,28 +48,19 @@ can find many good git tutorials on the web.
# 4. Install the dependencies
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
and development environment. Once you have installed Python 3 and added the
source, you should install `poetry`.
Of their installation methods, we recommend
[installing `poetry` using `pipx`](https://python-poetry.org/docs/#installing-with-pipx),
```shell
pip install --user pipx
pipx install poetry
```
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Next, open a terminal and install dependencies as follows:
Once you have installed Python 3 and added the source, please open a terminal and
setup a *virtualenv*, as follows:
```sh
cd path/where/you/have/cloned/the/repository
poetry install --extras all
python3 -m venv ./env
source ./env/bin/activate
pip install wheel
pip install -e ".[all,dev]"
pip install tox
```
This will install the runtime and developer dependencies for the project.
This will install the developer dependencies for the project.
# 5. Get in touch.
@@ -126,10 +117,11 @@ The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code.
The linter takes no time at all to run as soon as you've [downloaded the dependencies](#4-install-the-dependencies).
The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies).
```sh
poetry run ./scripts-dev/lint.sh
source ./env/bin/activate
./scripts-dev/lint.sh
```
Note that this script *will modify your files* to fix styling errors.
@@ -139,13 +131,15 @@ If you wish to restrict the linters to only the files changed since the last com
(much faster!), you can instead run:
```sh
poetry run ./scripts-dev/lint.sh -d
source ./env/bin/activate
./scripts-dev/lint.sh -d
```
Or if you know exactly which files you wish to lint, you can instead run:
```sh
poetry run ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
source ./env/bin/activate
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
## Run the unit tests (Twisted trial).
@@ -154,14 +148,16 @@ The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors.
```sh
poetry run trial tests
source ./env/bin/activate
trial tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
```sh
poetry run trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
source ./env/bin/activate
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
```
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
@@ -173,7 +169,7 @@ less _trial_temp/test.log
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
```sh
SYNAPSE_TEST_LOG_LEVEL=DEBUG poetry run trial tests
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
```
By default, tests will use an in-memory SQLite database for test data. For additional
@@ -184,7 +180,7 @@ database state to be stored in a file named `test.db` under the trial process'
working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
```sh
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 poetry run trial tests
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests
```
The database file can then be inspected with:
@@ -210,10 +206,9 @@ To do so, [configure Postgres](../postgres.md) and run `trial` with the
following environment variables matching your configuration:
- `SYNAPSE_POSTGRES` to anything nonempty
- `SYNAPSE_POSTGRES_HOST` (optional if it's the default: UNIX socket)
- `SYNAPSE_POSTGRES_PORT` (optional if it's the default: 5432)
- `SYNAPSE_POSTGRES_USER` (optional if using a UNIX socket)
- `SYNAPSE_POSTGRES_PASSWORD` (optional if using a UNIX socket)
- `SYNAPSE_POSTGRES_HOST`
- `SYNAPSE_POSTGRES_USER`
- `SYNAPSE_POSTGRES_PASSWORD`
For example:
@@ -225,12 +220,26 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
trial
```
You don't need to specify the host, user, port or password if your Postgres
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
works without further arguments).
#### Prebuilt container
Your Postgres account needs to be able to create databases.
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
Docker container to set up PostgreSQL and run our tests for us. To do so, run
```shell
scripts-dev/test_postgresql.sh
```
Any extra arguments to the script will be passed to `tox` and then to `trial`,
so we can run a specific test in this container with e.g.
```shell
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
```
The container creates a folder in your Synapse checkout called
`.tox-pg-container` and uses this as a tox environment. The output of any
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
as running `trial` directly on your host machine.
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
@@ -245,14 +254,8 @@ configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
This configuration should generally cover your needs.
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
@@ -270,13 +273,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
```
To run a specific test, you can specify the whole name structure:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
```

View File

@@ -1,239 +0,0 @@
# Managing dependencies with Poetry
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
Some of these are direct dependencies, listed in `pyproject.toml` under the
`[tool.poetry.dependencies]` section. The rest are transitive dependencies (the
things that our direct dependencies themselves depend on, and so on recursively.)
We maintain a locked list of all our dependencies (transitive included) so that
we can track exactly which version of each dependency appears in a given release.
See [here](https://github.com/matrix-org/synapse/issues/11537#issue-1074469665)
for discussion of why we wanted this for Synapse. We chose to use
[`poetry`](https://python-poetry.org/) to manage this locked list; see
[this comment](https://github.com/matrix-org/synapse/issues/11537#issuecomment-1015975819)
for the reasoning.
The locked dependencies get included in our "self-contained" releases: namely,
our docker images and our debian packages. We also use the locked dependencies
in development and our continuous integration.
Separately, our "broad" dependencies—the version ranges specified in
`pyproject.toml`—are included as metadata in our "sdists" and "wheels" [uploaded
to PyPI](https://pypi.org/project/matrix-synapse). Installing from PyPI or from
the Synapse source tree directly will _not_ use the locked dependencies; instead,
they'll pull in the latest version of each package available at install time.
## Example dependency
An example may help. We have a broad dependency on
[`phonenumbers`](https://pypi.org/project/phonenumbers/), as declared in
this snippet from pyproject.toml [as of Synapse 1.57](
https://github.com/matrix-org/synapse/blob/release-v1.57/pyproject.toml#L133
):
```toml
[tool.poetry.dependencies]
# ...
phonenumbers = ">=8.2.0"
```
In our lockfile this is
[pinned]( https://github.com/matrix-org/synapse/blob/dfc7646504cef3e4ff396c36089e1c6f1b1634de/poetry.lock#L679-L685)
to version 8.12.44, even though
[newer versions are available](https://pypi.org/project/phonenumbers/#history).
```toml
[[package]]
name = "phonenumbers"
version = "8.12.44"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
python-versions = "*"
```
The lockfile also includes a
[cryptographic checksum](https://github.com/matrix-org/synapse/blob/release-v1.57/poetry.lock#L2178-L2181)
of the sdists and wheels provided for this version of `phonenumbers`.
```toml
[metadata.files]
# ...
phonenumbers = [
{file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
{file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
]
```
We can see this pinned version inside the docker image for that release:
```
$ docker pull matrixdotorg/synapse:v1.57.0
...
$ docker run --entrypoint pip matrixdotorg/synapse:v1.57.0 show phonenumbers
Name: phonenumbers
Version: 8.12.44
Summary: Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.
Home-page: https://github.com/daviddrysdale/python-phonenumbers
Author: David Drysdale
Author-email: dmd@lurklurk.org
License: Apache License 2.0
Location: /usr/local/lib/python3.9/site-packages
Requires:
Required-by: matrix-synapse
```
Whereas the wheel metadata just contains the broad dependencies:
```
$ cd /tmp
$ wget https://files.pythonhosted.org/packages/ca/5e/d722d572cc5b3092402b783d6b7185901b444427633bd8a6b00ea0dd41b7/matrix_synapse-1.57.0rc1-py3-none-any.whl
...
$ unzip -c matrix_synapse-1.57.0rc1-py3-none-any.whl matrix_synapse-1.57.0rc1.dist-info/METADATA | grep phonenumbers
Requires-Dist: phonenumbers (>=8.2.0)
```
# Tooling recommendation: direnv
[`direnv`](https://direnv.net/) is a tool for activating environments in your
shell inside a given directory. Its support for poetry is unofficial (a
community wiki recipe only), but works solidly in our experience. We thoroughly
recommend it for daily use. To use it:
1. [Install `direnv`](https://direnv.net/docs/installation.html) - it's likely
packaged for your system already.
2. Teach direnv about poetry. The [shell config here](https://github.com/direnv/direnv/wiki/Python#poetry)
needs to be added to `~/.config/direnv/direnvrc` (or more generally `$XDG_CONFIG_HOME/direnv/direnvrc`).
3. Mark the synapse checkout as a poetry project: `echo layout poetry > .envrc`.
4. Convince yourself that you trust this `.envrc` configuration and project.
Then formally confirm this to `direnv` by running `direnv allow`.
Then whenever you navigate to the synapse checkout, you should be able to run
e.g. `mypy` instead of `poetry run mypy`; `python` instead of
`poetry run python`; and your shell commands will automatically run in the
context of poetry's venv, without having to run `poetry shell` beforehand.
# How do I...
## ...reset my venv to the locked environment?
```shell
poetry install --extras all --remove-untracked
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
To avoid typing `poetry run` all the time, you can run `poetry shell`
to start a new shell in the poetry virtualenv context. Within `poetry shell`,
`python`, `pip`, `mypy`, `trial`, etc. are all run inside the project virtualenv
and isolated from the rest o the system.
Roughly speaking, the translation from a traditional virtualenv is:
- `env/bin/activate` -> `poetry shell`, and
- `deactivate` -> close the terminal (Ctrl-D, `exit`, etc.)
See also the direnv recommendation above, which makes `poetry run` and
`poetry shell` unnecessary.
## ...inspect the `poetry` virtualenv?
Some suggestions:
```shell
# Current env only
poetry env info
# All envs: this allows you to have e.g. a poetry managed venv for Python 3.7,
# and another for Python 3.10.
poetry env list --full-path
poetry run pip list
```
Note that `poetry show` describes the abstract *lock file* rather than your
on-disk environment. With that said, `poetry show --tree` can sometimes be
useful.
## ...add a new dependency?
Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
## ...remove a dependency?
This is not done often and is untested, but
```shell
poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`.
Include the updated `pyproject.toml` and `poetry.lock` in your commit.
## ...update a dependency in the locked environment?
Use
```shell
poetry update packagename
```
to use the latest version of `packagename` in the locked environment, without
affecting the broad dependencies listed in the wheel.
There doesn't seem to be a way to do this whilst locking a _specific_ version of
`packagename`. We can workaround this (crudely) as follows:
```shell
poetry add packagename==1.2.3
# This should update pyproject.lock.
# Now undo the changes to pyproject.toml. For example
# git restore pyproject.toml
# Get poetry to recompute the content-hash of pyproject.toml without changing
# the locked package versions.
poetry lock --no-update
```
Either way, include the updated `poetry.lock` file in your commit.
## ...export a `requirements.txt` file?
```shell
poetry export --extras all
```
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
```shell
poetry run pip install build && poetry run python -m build
```
because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.

View File

@@ -39,8 +39,7 @@ yet correlated to the DAG.
Outliers typically arise when we fetch the auth chain or state for a given
event. When that happens, we just grab the events in the state/auth chain,
without calculating the state at those events, or backfilling their
`prev_events`. Since we don't have the state at any events fetched in that
way, we mark them as outliers.
`prev_events`.
So, typically, we won't have the `prev_events` of an `outlier` in the database,
(though it's entirely possible that we *might* have them for some other

View File

@@ -17,6 +17,9 @@ follows:
}
```
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
will be removed in a future version of Synapse.
The `token` field should include the JSON web token with the following claims:
* A claim that encodes the local part of the user ID is required. By default,

View File

@@ -1,106 +0,0 @@
# Account data callbacks
Account data callbacks allow module developers to react to changes of the account data
of local users. Account data callbacks can be registered using the module API's
`register_account_data_callbacks` method.
## Callbacks
The available account data callbacks are:
### `on_account_data_updated`
_First introduced in Synapse v1.57.0_
```python
async def on_account_data_updated(
user_id: str,
room_id: Optional[str],
account_data_type: str,
content: "synapse.module_api.JsonDict",
) -> None:
```
Called after user's account data has been updated. The module is given the
Matrix ID of the user whose account data is changing, the room ID the data is associated
with, the type associated with the change, as well as the new content. If the account
data is not associated with a specific room, then the room ID is `None`.
This callback is triggered when new account data is added or when the data associated with
a given type (and optionally room) changes. This includes deletion, since in Matrix,
deleting account data consists of replacing the data associated with a given type
(and optionally room) with an empty dictionary (`{}`).
Note that this doesn't trigger when changing the tags associated with a room, as these are
processed separately by Synapse.
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the `on_account_data_updated` callback, and
sends an event to an audit room when a user changes their account data.
```python
import json
import attr
from typing import Any, Dict, Optional
from synapse.module_api import JsonDict, ModuleApi
from synapse.module_api.errors import ConfigError
@attr.s(auto_attribs=True)
class CustomAccountDataConfig:
audit_room: str
sender: str
class CustomAccountDataModule:
def __init__(self, config: CustomAccountDataConfig, api: ModuleApi):
self.api = api
self.config = config
self.api.register_account_data_callbacks(
on_account_data_updated=self.log_new_account_data,
)
@staticmethod
def parse_config(config: Dict[str, Any]) -> CustomAccountDataConfig:
def check_in_config(param: str):
if param not in config:
raise ConfigError(f"'{param}' is required")
check_in_config("audit_room")
check_in_config("sender")
return CustomAccountDataConfig(
audit_room=config["audit_room"],
sender=config["sender"],
)
async def log_new_account_data(
self,
user_id: str,
room_id: Optional[str],
account_data_type: str,
content: JsonDict,
) -> None:
content_raw = json.dumps(content)
msg_content = f"{user_id} has changed their account data for type {account_data_type} to: {content_raw}"
if room_id is not None:
msg_content += f" (in room {room_id})"
await self.api.create_and_send_event_into_room(
{
"room_id": self.config.audit_room,
"sender": self.config.sender,
"type": "m.room.message",
"content": {
"msgtype": "m.text",
"body": msg_content
}
}
)
```

View File

@@ -172,7 +172,7 @@ any of the subsequent implementations of this callback.
_First introduced in Synapse v1.37.0_
```python
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
```
Called when computing search results in the user directory. The module must return a
@@ -182,11 +182,9 @@ search results; otherwise return `False`.
The profile is represented as a dictionary with the following keys:
* `user_id: str`. The Matrix ID for this user.
* `display_name: Optional[str]`. The user's display name, or `None` if this user
has not set a display name.
* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None`
if this user has not set an avatar.
* `user_id`: The Matrix ID for this user.
* `display_name`: The user's display name.
* `avatar_url`: The `mxc://` URL to the user's avatar.
The module is given a copy of the original dictionary, so modifying it from within the
module cannot modify a user's profile when included in user directory search results.

View File

@@ -247,24 +247,6 @@ admin API.
If multiple modules implement this callback, Synapse runs them all in order.
### `on_threepid_bind`
_First introduced in Synapse v1.56.0_
```python
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
```
Called after creating an association between a local user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier.
Note that this callback is _not_ called after a successful association on an _identity
server_.
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the third-party rules callback

View File

@@ -33,7 +33,7 @@ A module can implement the following static method:
```python
@staticmethod
def parse_config(config: dict) -> Any
def parse_config(config: dict) -> dict
```
This method is given a dictionary resulting from parsing the YAML configuration for the

View File

@@ -225,8 +225,6 @@ oidc_providers:
3. Create an application for synapse in Authentik and link it to the provider.
4. Note the slug of your application, Client ID and Client Secret.
Note: RSA keys must be used for signing for Authentik, ECC keys do not work.
Synapse config:
```yaml
oidc_providers:
@@ -242,7 +240,7 @@ oidc_providers:
- "email"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
localpart_template: "{{ user.preferred_username }}}"
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
```

View File

@@ -234,13 +234,12 @@ host all all ::1/128 ident
### Fixing incorrect `COLLATE` or `CTYPE`
Synapse will refuse to set up a new database if it has the wrong values of
`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
different locales can cause issues if the locale library is updated from
underneath the database, or if a different version of the locale is used on any
replicas.
If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
The safest way to fix the issue is to dump the database and recreate it with
the correct locale parameter (as shown above). It is also possible to change the
parameters on a live database and run a `REINDEX` on the entire database,
however extreme care must be taken to avoid database corruption.

View File

@@ -35,8 +35,3 @@ See [the TCP replication documentation](tcp_replication.md).
There are read-only version of the synapse storage layer in
`synapse/replication/slave/storage` that use the response of the
replication API to invalidate their caches.
### The TCP Replication Module
Information about how the tcp replication module is structured, including how
the classes interact, can be found in
`synapse/replication/tcp/__init__.py`

View File

@@ -182,7 +182,7 @@ matrix.example.com {
```
frontend https
bind *:443,[::]:443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-For %[src]
@@ -195,7 +195,7 @@ frontend https
use_backend matrix if matrix-host matrix-path
frontend matrix-federation
bind *:8448,[::]:8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-For %[src]
@@ -206,28 +206,6 @@ backend matrix
server matrix 127.0.0.1:8008
```
[Delegation](delegate.md) example:
```
frontend https
acl matrix-well-known-client-path path /.well-known/matrix/client
acl matrix-well-known-server-path path /.well-known/matrix/server
use_backend matrix-well-known-client if matrix-well-known-client-path
use_backend matrix-well-known-server if matrix-well-known-server-path
backend matrix-well-known-client
http-after-response set-header Access-Control-Allow-Origin "*"
http-after-response set-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
http-after-response set-header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
http-request return status 200 content-type application/json string '{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}'
backend matrix-well-known-server
http-after-response set-header Access-Control-Allow-Origin "*"
http-after-response set-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
http-after-response set-header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
http-request return status 200 content-type application/json string '{"m.server":"matrix.example.com:443"}'
```
### Relayd
```

View File

@@ -407,11 +407,6 @@ manhole_settings:
# sign up in a short space of time never to return after their initial
# session.
#
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
# applies a different trial number if the user was registered by an appservice.
# A value of 0 means no trial days are applied. Appservices not listed in this
# dictionary use the value of `mau_trial_days` instead.
#
# 'mau_limit_alerting' is a means of limiting client side alerting
# should the mau limit be reached. This is useful for small instances
# where the admin has 5 mau seats (say) for 5 specific people and no
@@ -422,8 +417,6 @@ manhole_settings:
#max_mau_value: 50
#mau_trial_days: 2
#mau_limit_alerting: false
#mau_appservice_trial_days:
# "appservice-id": 1
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
@@ -546,15 +539,6 @@ templates:
#
#custom_template_directory: /path/to/custom/templates/
# List of rooms to exclude from sync responses. This is useful for server
# administrators wishing to group users into a room without these users being able
# to see it from their client.
#
# By default, no room is excluded.
#
#exclude_rooms_from_sync:
# - !foo:example.com
# Message retention policy at the server level.
#
@@ -716,11 +700,11 @@ retention:
#
#allow_profile_lookup_over_federation: false
# Uncomment to allow device display name lookup over federation. By default, the
# Federation API prevents other homeservers from obtaining the display names of
# user devices on this homeserver. Defaults to 'false'.
# Uncomment to disable device display name lookup over federation. By default, the
# Federation API allows other homeservers to obtain device display names of any user
# on this homeserver. Defaults to 'true'.
#
#allow_device_name_lookup_over_federation: true
#allow_device_name_lookup_over_federation: false
## Caching ##
@@ -799,12 +783,6 @@ caches:
# 'txn_limit' gives the maximum number of transactions to run per connection
# before reconnecting. Defaults to 0, which means no limit.
#
# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to
# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended)
# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information
# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here:
# https://wiki.postgresql.org/wiki/Locale_data_changes
#
# 'args' gives options which are passed through to the database engine,
# except for options starting 'cp_', which are used to configure the Twisted
# connection pool. For a reference to valid arguments, see:
@@ -1234,18 +1212,10 @@ oembed:
# Registration can be rate-limited using the parameters in the "Ratelimiting"
# section of this file.
# Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration,
# you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
# without any verification, you must also set `enable_registration_without_verification`, found below.
# Enable registration for new users.
#
#enable_registration: false
# Enable registration without email or captcha verification. Note: this option is *not* recommended,
# as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
# unless `enable_registration` is also enabled.
#
#enable_registration_without_verification: true
# Time that a user's session remains valid for, after they log in.
#
# Note that this is not currently compatible with guest logins.
@@ -1330,12 +1300,6 @@ oembed:
#
#registration_requires_token: true
# Allow users to submit a token during registration to bypass any required 3pid
# steps configured in `registrations_require_3pid`.
# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow.
#
#enable_registration_token_3pid_bypass: false
# If set, allows registration of standard or admin accounts by anyone who
# has the shared secret, even if registration is otherwise disabled.
#

View File

@@ -62,6 +62,13 @@ loggers:
# information such as access tokens.
level: INFO
twisted:
# We send the twisted logging directly to the file handler,
# to work around https://github.com/matrix-org/synapse/issues/3471
# when using "buffer" logger. Use "console" to log to stderr instead.
handlers: [file]
propagate: false
root:
level: INFO

View File

@@ -78,3 +78,82 @@ loggers:
The above logging config will set Synapse as 'INFO' logging level by default,
with the SQL layer at 'WARNING', and will log JSON formatted messages to a
remote endpoint at 10.1.2.3:9999.
## Upgrading from legacy structured logging configuration
Versions of Synapse prior to v1.54.0 automatically converted the legacy
structured logging configuration, which was deprecated in v1.23.0, to the standard
library logging configuration.
The following reference can be used to update your configuration. Based on the
drain `type`, we can pick a new handler:
1. For a type of `console`, `console_json`, or `console_json_terse`: a handler
with a class of `logging.StreamHandler` and a `stream` of `ext://sys.stdout`
or `ext://sys.stderr` should be used.
2. For a type of `file` or `file_json`: a handler of `logging.FileHandler` with
a location of the file path should be used.
3. For a type of `network_json_terse`: a handler of `synapse.logging.RemoteHandler`
with the host and port should be used.
Then based on the drain `type` we can pick a new formatter:
1. For a type of `console` or `file` no formatter is necessary.
2. For a type of `console_json` or `file_json`: a formatter of
`synapse.logging.JsonFormatter` should be used.
3. For a type of `console_json_terse` or `network_json_terse`: a formatter of
`synapse.logging.TerseJsonFormatter` should be used.
For each new handler and formatter they should be added to the logging configuration
and then assigned to either a logger or the root logger.
An example legacy configuration:
```yaml
structured: true
loggers:
synapse:
level: INFO
synapse.storage.SQL:
level: WARNING
drains:
console:
type: console
location: stdout
file:
type: file_json
location: homeserver.log
```
Would be converted into a new configuration:
```yaml
version: 1
formatters:
json:
class: synapse.logging.JsonFormatter
handlers:
console:
class: logging.StreamHandler
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: json
filename: homeserver.log
loggers:
synapse:
level: INFO
handlers: [console, file]
synapse.storage.SQL:
level: WARNING
```
The new logging configuration is a bit more verbose, but significantly more
flexible. It allows for configuration that were not previously possible, such as
sending plain logs over the network, or using different handlers for different
modules.

View File

@@ -10,15 +10,15 @@ See the folder [system](https://github.com/matrix-org/synapse/tree/develop/docs/
for the systemd unit files.
The folder [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
contains an example configuration for the `generic_worker` worker.
contains an example configuration for the `federation_reader` worker.
## Synapse configuration files
See [the worker documentation](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly.
Below is a sample `generic_worker` worker configuration file.
Below is a sample `federation_reader` worker configuration file.
```yaml
{{#include workers/generic_worker.yaml}}
{{#include workers/federation_reader.yaml}}
```
Systemd manages daemonization itself, so ensure that none of the configuration
@@ -61,9 +61,9 @@ systemctl stop matrix-synapse.target
# Restart the master alone
systemctl start matrix-synapse.service
# Restart a specific worker (eg. generic_worker); the master is
# Restart a specific worker (eg. federation_reader); the master is
# unaffected by this.
systemctl restart matrix-synapse-worker@generic_worker.service
systemctl restart matrix-synapse-worker@federation_reader.service
# Add a new worker (assuming all configs are set up already)
systemctl enable matrix-synapse-worker@federation_writer.service

View File

@@ -1,8 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: background_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml

View File

@@ -1,23 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: event_persister1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
# Enable listener if this stream writer handles endpoints for the `typing` or
# `to_device` streams. Uses a different port to the `replication` listener to
# avoid exposing the `replication` listener publicly.
#
#- type: http
# port: 8035
# resources:
# - names: [client]
worker_log_config: /etc/matrix-synapse/event-persister-log.yaml

View File

@@ -0,0 +1,13 @@
worker_app: synapse.app.federation_reader
worker_name: federation_reader1
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8011
resources:
- names: [federation]
worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml

View File

@@ -1,14 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml

View File

@@ -302,14 +302,14 @@ Here are a few things to try:
(Understanding the output is beyond the scope of this document!)
* You can test your Matrix homeserver TURN setup with <https://test.voip.librepush.net/>.
* You can test your Matrix homeserver TURN setup with https://test.voip.librepush.net/.
Note that this test is not fully reliable yet, so don't be discouraged if
the test fails.
[Here](https://github.com/matrix-org/voip-tester) is the github repo of the
source of the tester, where you can file bug reports.
* There is a WebRTC test tool at
<https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/>. To
https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/. To
use it, you will need a username/password for your TURN server. You can
either:

View File

@@ -19,36 +19,32 @@ this document.
packages](setup/installation.md#prebuilt-packages), you will need to follow the
normal process for upgrading those packages.
- If Synapse was installed using pip then upgrade to the latest
version by running:
```bash
pip install --upgrade matrix-synapse
```
- If Synapse was installed from source, then:
1. Obtain the latest version of the source code. Git users can run
`git pull` to do this.
2. If you're running Synapse in a virtualenv, make sure to activate it before
upgrading. For example, if Synapse is installed in a virtualenv in `~/synapse/env` then
1. Activate the virtualenv before upgrading. For example, if
Synapse is installed in a virtualenv in `~/synapse/env` then
run:
```bash
source ~/synapse/env/bin/activate
```
2. If Synapse was installed using pip then upgrade to the latest
version by running:
```bash
pip install --upgrade matrix-synapse
```
If Synapse was installed using git then upgrade to the latest
version by running:
```bash
git pull
pip install --upgrade .
```
Include any relevant extras between square brackets, e.g. `pip install --upgrade ".[postgres,oidc]"`.
3. If you're using `poetry` to manage a Synapse installation, run:
```bash
poetry install
```
Include any relevant extras with `--extras`, e.g. `poetry install --extras postgres --extras oidc`.
It's probably easiest to run `poetry install --extras all`.
4. Restart Synapse:
3. Restart Synapse:
```bash
synctl restart
@@ -89,117 +85,8 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.59.0
## Device name lookup over federation has been disabled by default
The names of user devices are no longer visible to users on other homeservers by default.
Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption.
To re-enable this functionality, set the
[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation)
homeserver config option to `true`.
## Deprecation of the `synapse.app.appservice` and `synapse.app.user_dir` worker application types
The `synapse.app.appservice` worker application type allowed you to configure a
single worker to use to notify application services of new events, as long
as this functionality was disabled on the main process with `notify_appservices: False`.
Further, the `synapse.app.user_dir` worker application type allowed you to configure
a single worker to be responsible for updating the user directory, as long as this
was disabled on the main process with `update_user_directory: False`.
To unify Synapse's worker types, the `synapse.app.appservice` worker application
type and the `notify_appservices` configuration option have been deprecated.
The `synapse.app.user_dir` worker application type and `update_user_directory`
configuration option have also been deprecated.
To get the same functionality as was provided by the deprecated options, it's now recommended that the `synapse.app.generic_worker`
worker application type is used and that the `notify_appservices_from_worker` and/or
`update_user_directory_from_worker` options are set to the name of a worker.
For the time being, the old options can be used alongside the new options to make
it easier to transition between the two configurations, however please note that:
- the options must not contradict each other (otherwise Synapse won't start); and
- the `notify_appservices` and `update_user_directory` options will be removed in a future release of Synapse.
Please see the [*Notifying Application Services*][v1_59_notify_ases_from] and
[*Updating the User Directory*][v1_59_update_user_dir] sections of the worker
documentation for more information.
[v1_59_notify_ases_from]: workers.md#notifying-application-services
[v1_59_update_user_dir]: workers.md#updating-the-user-directory
# Upgrading to v1.58.0
## Groups/communities feature has been disabled by default
The non-standard groups/communities feature in Synapse has been disabled by default
and will be removed in Synapse v1.61.0.
# Upgrading to v1.57.0
## Changes to database schema for application services
Synapse v1.57.0 includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the
way transaction IDs are managed for application services. If your deployment uses a dedicated
worker for application service traffic, **it must be stopped** when the database is upgraded
(which normally happens when the main process is upgraded), to ensure the change is made safely
without any risk of reusing transaction IDs.
Deployments which do not use separate worker processes can be upgraded as normal. Similarly,
deployments where no application services are in use can be upgraded as normal.
<details>
<summary><b>Recovering from an incorrect upgrade</b></summary>
If the database schema is upgraded *without* stopping the worker responsible
for AS traffic, then the following error may be given when attempting to start
a Synapse worker or master process:
```
**********************************************************************************
Error during initialisation:
Postgres sequence 'application_services_txn_id_seq' is inconsistent with associated
table 'application_services_txns'. This can happen if Synapse has been downgraded and
then upgraded again, or due to a bad migration.
To fix this error, shut down Synapse (including any and all workers)
and run the following SQL:
SELECT setval('application_services_txn_id_seq', (
SELECT GREATEST(MAX(txn_id), 0) FROM application_services_txns
));
See docs/postgres.md for more information.
There may be more information in the logs.
**********************************************************************************
```
This error may also be seen if Synapse is *downgraded* to an earlier version,
and then upgraded again to v1.57.0 or later.
In either case:
1. Ensure that the worker responsible for AS traffic is stopped.
2. Run the SQL command given in the error message via `psql`.
Synapse should then start correctly.
</details>
# Upgrading to v1.56.0
## Open registration without verification is now disabled by default
Synapse will refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config
flag `enable_registration_without_verification` is set to "true".
## Groups/communities feature has been deprecated
The non-standard groups/communities feature in Synapse has been deprecated and will
@@ -212,13 +99,6 @@ experimental_features:
groups_enabled: false
```
## Change in behaviour for PostgreSQL databases with unsafe locale
Synapse now refuses to start when using PostgreSQL with non-`C` values for `COLLATE` and
`CTYPE` unless the config flag `allow_unsafe_locale`, found in the database section of
the configuration file, is set to `true`. See the [PostgreSQL documentation](https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype)
for more information and instructions on how to fix a database with incorrect values.
# Upgrading to v1.55.0
## `synctl` script has been moved
@@ -227,15 +107,15 @@ The `synctl` script
[has been made](https://github.com/matrix-org/synapse/pull/12140) an
[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
and no longer exists at the root of Synapse's source tree. If you wish to use
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g.
`synctl start` instead of `./synctl start` or `/path/to/synctl start`.
You will need to ensure `synctl` is on your `PATH`.
- This is automatically the case when using
[Debian packages](https://packages.matrix.org/debian/) or
[docker images](https://hub.docker.com/r/matrixdotorg/synapse)
provided by Matrix.org.
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
- When installing from a wheel, sdist, or PyPI, a `synctl` executable is added
to your Python installation's `bin`. This should be on your `PATH`
automatically, though you might need to activate a virtual environment
depending on how you installed Synapse.
@@ -255,7 +135,7 @@ please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
This release removes support for the `structured: true` logging configuration
which was deprecated in Synapse v1.23.0. If your logging configuration contains
`structured: true` then it should be modified based on the
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
[structured logging documentation](structured_logging.md).
# Upgrading to v1.53.0
@@ -271,8 +151,8 @@ the `/_matrix/client/` path.
## Stablisation of MSC3231
The unstable validity-check endpoint for the
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
The unstable validity-check endpoint for the
[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity)
feature has been stabilised and moved from:
`/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity`
@@ -286,9 +166,9 @@ Please update any relevant reverse proxy or firewall configurations appropriatel
## Time-based cache expiry is now enabled by default
Formerly, entries in the cache were not evicted regardless of whether they were accessed after storing.
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed.
To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and
`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config.
The `expiry_time` flag will still continue to work, but it has been deprecated and will be removed in the future.
## Deprecation of `capability` `org.matrix.msc3283.*`
@@ -872,7 +752,7 @@ lock down external access to the Admin API endpoints.
This release deprecates use of the `structured: true` logging
configuration for structured logging. If your logging configuration
contains `structured: true` then it should be modified based on the
[structured logging documentation](https://matrix-org.github.io/synapse/v1.56/structured_logging.html#upgrading-from-legacy-structured-logging-configuration).
[structured logging documentation](structured_logging.md).
The `structured` and `drains` logging options are now deprecated and
should be replaced by standard logging configuration of `handlers` and

View File

@@ -28,7 +28,7 @@ See the following for how to decode the dense data available from the default lo
| NNNN | Total time waiting for response to DB queries across all parallel DB work from this request |
| OOOO | Count of DB transactions performed |
| PPPP | Response body size |
| QQQQ | Response status code<br/>Suffixed with `!` if the socket was closed before the response was generated.<br/>A `499!` status code indicates that Synapse also cancelled request processing after the socket was closed.<br/> |
| QQQQ | Response status code (prefixed with ! if the socket was closed before the response was generated) |
| RRRR | Request |
| SSSS | User-agent |
| TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) |

View File

@@ -1,10 +1,7 @@
## Some useful SQL queries for Synapse Admins
## Size of full matrix db
```sql
SELECT pg_size_pretty( pg_database_size( 'matrix' ) );
```
`SELECT pg_size_pretty( pg_database_size( 'matrix' ) );`
### Result example:
```
pg_size_pretty
@@ -12,19 +9,39 @@ pg_size_pretty
6420 MB
(1 row)
```
## Show top 20 larger rooms by state events count
```sql
SELECT r.name, s.room_id, s.current_state_events
FROM room_stats_current s
LEFT JOIN room_stats_state r USING (room_id)
ORDER BY current_state_events DESC
LIMIT 20;
```
and by state_group_events count:
```sql
SELECT rss.name, s.room_id, count(s.room_id) FROM state_groups_state s
LEFT JOIN room_stats_state rss USING (room_id)
GROUP BY s.room_id, rss.name
ORDER BY count(s.room_id) DESC
LIMIT 20;
```
plus same, but with join removed for performance reasons:
```sql
SELECT s.room_id, count(s.room_id) FROM state_groups_state s
GROUP BY s.room_id
ORDER BY count(s.room_id) DESC
LIMIT 20;
```
## Show top 20 larger tables by row count
```sql
SELECT relname, n_live_tup AS "rows"
FROM pg_stat_user_tables
SELECT relname, n_live_tup as rows
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC
LIMIT 20;
```
This query is quick, but may be very approximate, for exact number of rows use:
```sql
SELECT COUNT(*) FROM <table_name>;
```
This query is quick, but may be very approximate, for exact number of rows use `SELECT COUNT(*) FROM <table_name>`.
### Result example:
```
state_groups_state - 161687170
@@ -49,19 +66,46 @@ device_lists_stream - 326903
user_directory_search - 316433
```
## Show top 20 rooms by new events count in last 1 day:
```sql
SELECT e.room_id, r.name, COUNT(e.event_id) cnt FROM events e
LEFT JOIN room_stats_state r USING (room_id)
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 GROUP BY e.room_id, r.name ORDER BY cnt DESC LIMIT 20;
```
## Show top 20 users on homeserver by sent events (messages) at last month:
```sql
SELECT user_id, SUM(total_events)
FROM user_stats_historical
WHERE TO_TIMESTAMP(end_ts/1000) AT TIME ZONE 'UTC' > date_trunc('day', now() - interval '1 month')
GROUP BY user_id
ORDER BY SUM(total_events) DESC
LIMIT 20;
```
## Show last 100 messages from needed user, with room names:
```sql
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json FROM events e
LEFT JOIN event_json j USING (room_id)
LEFT JOIN room_stats_state r USING (room_id)
WHERE sender = '@LOGIN:example.com'
AND e.type = 'm.room.message'
ORDER BY stream_ordering DESC
LIMIT 100;
```
## Show top 20 larger tables by storage size
```sql
SELECT nspname || '.' || relname AS "relation",
pg_size_pretty(pg_total_relation_size(c.oid)) AS "total_size"
FROM pg_class c
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND c.relkind <> 'i'
AND C.relkind <> 'i'
AND nspname !~ '^pg_toast'
ORDER BY pg_total_relation_size(c.oid) DESC
ORDER BY pg_total_relation_size(C.oid) DESC
LIMIT 20;
```
### Result example:
```
public.state_groups_state - 27 GB
@@ -86,93 +130,8 @@ public.device_lists_remote_cache - 124 MB
public.state_group_edges - 122 MB
```
## Show top 20 larger rooms by state events count
You get the same information when you use the
[admin API](../../admin_api/rooms.md#list-room-api)
and set parameter `order_by=state_events`.
```sql
SELECT r.name, s.room_id, s.current_state_events
FROM room_stats_current s
LEFT JOIN room_stats_state r USING (room_id)
ORDER BY current_state_events DESC
LIMIT 20;
```
and by state_group_events count:
```sql
SELECT rss.name, s.room_id, COUNT(s.room_id)
FROM state_groups_state s
LEFT JOIN room_stats_state rss USING (room_id)
GROUP BY s.room_id, rss.name
ORDER BY COUNT(s.room_id) DESC
LIMIT 20;
```
plus same, but with join removed for performance reasons:
```sql
SELECT s.room_id, COUNT(s.room_id)
FROM state_groups_state s
GROUP BY s.room_id
ORDER BY COUNT(s.room_id) DESC
LIMIT 20;
```
## Show top 20 rooms by new events count in last 1 day:
```sql
SELECT e.room_id, r.name, COUNT(e.event_id) cnt
FROM events e
LEFT JOIN room_stats_state r USING (room_id)
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000
GROUP BY e.room_id, r.name
ORDER BY cnt DESC
LIMIT 20;
```
## Show top 20 users on homeserver by sent events (messages) at last month:
Caution. This query does not use any indexes, can be slow and create load on the database.
```sql
SELECT COUNT(*), sender
FROM events
WHERE (type = 'm.room.encrypted' OR type = 'm.room.message')
AND origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 month') * 1000
GROUP BY sender
ORDER BY COUNT(*) DESC
LIMIT 20;
```
## Show last 100 messages from needed user, with room names:
```sql
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json
FROM events e
LEFT JOIN event_json j USING (room_id)
LEFT JOIN room_stats_state r USING (room_id)
WHERE sender = '@LOGIN:example.com'
AND e.type = 'm.room.message'
ORDER BY stream_ordering DESC
LIMIT 100;
```
## Show rooms with names, sorted by events in this rooms
**Sort and order with bash**
```bash
echo "SELECT event_json.room_id, room_stats_state.name FROM event_json, room_stats_state \
WHERE room_stats_state.room_id = event_json.room_id" | psql -d synapse -h localhost -U synapse_user -t \
| sort | uniq -c | sort -n
```
Documentation for `psql` command line parameters: https://www.postgresql.org/docs/current/app-psql.html
**Sort and order with SQL**
```sql
SELECT COUNT(*), event_json.room_id, room_stats_state.name
FROM event_json, room_stats_state
WHERE room_stats_state.room_id = event_json.room_id
GROUP BY event_json.room_id, room_stats_state.name
ORDER BY COUNT(*) DESC
LIMIT 50;
```
`echo "select event_json.room_id,room_stats_state.name from event_json,room_stats_state where room_stats_state.room_id=event_json.room_id" | psql synapse | sort | uniq -c | sort -n`
### Result example:
```
9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix
@@ -186,22 +145,12 @@ SELECT COUNT(*), event_json.room_id, room_stats_state.name
```
## Lookup room state info by list of room_id
You get the same information when you use the
[admin API](../../admin_api/rooms.md#room-details-api).
```sql
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption,
rsc.joined_members, rsc.local_users_in_room, rss.join_rules
FROM room_stats_state rss
LEFT JOIN room_stats_current rsc USING (room_id)
WHERE room_id IN ( WHERE room_id IN (
'!OGEhHVWSdvArJzumhm:matrix.org',
'!YTvKGNlinIzlkMTVRl:matrix.org'
);
```
## Show users and devices that have not been online for a while
```sql
SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_seen"
FROM devices
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
```
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules
FROM room_stats_state rss
LEFT JOIN room_stats_current rsc USING (room_id)
WHERE room_id IN (WHERE room_id IN (
'!OGEhHVWSdvArJzumhm:matrix.org',
'!YTvKGNlinIzlkMTVRl:matrix.org'
)
```

File diff suppressed because it is too large Load Diff

View File

@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
slightly so that they are more visually distinguishable from the section headers
(the bold titles). This is done through the `indent-section-headers.css` file.
In addition to these modifications, we have added a version picker to the documentation.
Users can switch between documentations for different versions of Synapse.
This functionality was implemented through the `version-picker.js` and
`version-picker.css` files.
More information can be found in mdbook's official documentation for
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
and

View File

@@ -75,20 +75,6 @@ function setTocEntry() {
* Populate sidebar on load
*/
window.addEventListener('load', () => {
// Prevent rendering the table of contents of the "print book" page, as it
// will end up being rendered into the output (in a broken-looking way)
// Get the name of the current page (i.e. 'print.html')
const pageNameExtension = window.location.pathname.split('/').pop();
// Split off the extension (as '.../print' is also a valid page name), which
// should result in 'print'
const pageName = pageNameExtension.split('.')[0];
if (pageName === "print") {
// Don't render the table of contents on this page
return;
}
// Only create table of contents if there is more than one header on the page
if (headers.length <= 1) {
return;

View File

@@ -131,6 +131,18 @@
<i class="fa fa-search"></i>
</button>
{{/if}}
<div class="version-picker">
<div class="dropdown">
<div class="select">
<span></span>
<i class="fa fa-chevron-down"></i>
</div>
<input type="hidden" name="version">
<ul class="dropdown-menu">
<!-- Versions will be added dynamically in version-picker.js -->
</ul>
</div>
</div>
</div>
<h1 class="menu-title">{{ book_title }}</h1>
@@ -309,4 +321,4 @@
{{/if}}
</body>
</html>
</html>

View File

@@ -0,0 +1,78 @@
.version-picker {
display: flex;
align-items: center;
}
.version-picker .dropdown {
width: 130px;
max-height: 29px;
margin-left: 10px;
display: inline-block;
border-radius: 4px;
border: 1px solid var(--theme-popup-border);
position: relative;
font-size: 13px;
color: var(--fg);
height: 100%;
text-align: left;
}
.version-picker .dropdown .select {
cursor: pointer;
display: block;
padding: 5px 2px 5px 15px;
}
.version-picker .dropdown .select > i {
font-size: 10px;
color: var(--fg);
cursor: pointer;
float: right;
line-height: 20px !important;
}
.version-picker .dropdown:hover {
border: 1px solid var(--theme-popup-border);
}
.version-picker .dropdown:active {
background-color: var(--theme-popup-bg);
}
.version-picker .dropdown.active:hover,
.version-picker .dropdown.active {
border: 1px solid var(--theme-popup-border);
border-radius: 2px 2px 0 0;
background-color: var(--theme-popup-bg);
}
.version-picker .dropdown.active .select > i {
transform: rotate(-180deg);
}
.version-picker .dropdown .dropdown-menu {
position: absolute;
background-color: var(--theme-popup-bg);
width: 100%;
left: -1px;
right: 1px;
margin-top: 1px;
border: 1px solid var(--theme-popup-border);
border-radius: 0 0 4px 4px;
overflow: hidden;
display: none;
max-height: 300px;
overflow-y: auto;
z-index: 9;
}
.version-picker .dropdown .dropdown-menu li {
font-size: 12px;
padding: 6px 20px;
cursor: pointer;
}
.version-picker .dropdown .dropdown-menu {
padding: 0;
list-style: none;
}
.version-picker .dropdown .dropdown-menu li:hover {
background-color: var(--theme-hover);
}
.version-picker .dropdown .dropdown-menu li.active::before {
display: inline-block;
content: "✓";
margin-inline-start: -14px;
width: 14px;
}

View File

@@ -0,0 +1,127 @@
const dropdown = document.querySelector('.version-picker .dropdown');
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
fetchVersions(dropdown, dropdownMenu).then(() => {
initializeVersionDropdown(dropdown, dropdownMenu);
});
/**
* Initialize the dropdown functionality for version selection.
*
* @param {Element} dropdown - The dropdown element.
* @param {Element} dropdownMenu - The dropdown menu element.
*/
function initializeVersionDropdown(dropdown, dropdownMenu) {
// Toggle the dropdown menu on click
dropdown.addEventListener('click', function () {
this.setAttribute('tabindex', 1);
this.classList.toggle('active');
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
});
// Remove the 'active' class and hide the dropdown menu on focusout
dropdown.addEventListener('focusout', function () {
this.classList.remove('active');
dropdownMenu.style.display = 'none';
});
// Handle item selection within the dropdown menu
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
dropdownMenuItems.forEach(function (item) {
item.addEventListener('click', function () {
dropdownMenuItems.forEach(function (item) {
item.classList.remove('active');
});
this.classList.add('active');
dropdown.querySelector('span').textContent = this.textContent;
dropdown.querySelector('input').value = this.getAttribute('id');
window.location.href = changeVersion(window.location.href, this.textContent);
});
});
};
/**
* This function fetches the available versions from a GitHub repository
* and inserts them into the version picker.
*
* @param {Element} dropdown - The dropdown element.
* @param {Element} dropdownMenu - The dropdown menu element.
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
*/
function fetchVersions(dropdown, dropdownMenu) {
return new Promise((resolve, reject) => {
window.addEventListener("load", () => {
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
cache: "force-cache",
}).then(res =>
res.json()
).then(resObject => {
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
const versions = tree.map(item => item.path).sort(sortVersions);
// Create a list of <li> items for versions
versions.forEach((version) => {
const li = document.createElement("li");
li.textContent = version;
li.id = version;
if (window.SYNAPSE_VERSION === version) {
li.classList.add('active');
dropdown.querySelector('span').textContent = version;
dropdown.querySelector('input').value = version;
}
dropdownMenu.appendChild(li);
});
resolve(versions);
}).catch(ex => {
console.error("Failed to fetch version data", ex);
reject(ex);
})
});
});
}
/**
* Custom sorting function to sort an array of version strings.
*
* @param {string} a - The first version string to compare.
* @param {string} b - The second version string to compare.
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
*/
function sortVersions(a, b) {
// Put 'develop' and 'latest' at the top
if (a === 'develop' || a === 'latest') return -1;
if (b === 'develop' || b === 'latest') return 1;
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
return versionB.localeCompare(versionA);
}
/**
* Change the version in a URL path.
*
* @param {string} url - The original URL to be modified.
* @param {string} newVersion - The new version to replace the existing version in the URL.
* @returns {string} The updated URL with the new version.
*/
function changeVersion(url, newVersion) {
const parsedURL = new URL(url);
const pathSegments = parsedURL.pathname.split('/');
// Modify the version
pathSegments[2] = newVersion;
// Reconstruct the URL
parsedURL.pathname = pathSegments.join('/');
return parsedURL.href;
}

View File

@@ -0,0 +1 @@
window.SYNAPSE_VERSION = 'v1.55';

View File

@@ -27,7 +27,7 @@ feeds streams of newly written data between processes so they can be kept in
sync with the database state.
When configured to do so, Synapse uses a
[Redis pub/sub channel](https://redis.io/docs/manual/pubsub/) to send the replication
[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
stream between all configured Synapse processes. Additionally, processes may
make HTTP requests to each other, primarily for operations which need to wait
for a reply ─ such as sending an event.
@@ -138,7 +138,22 @@ as the `listeners` option in the shared config.
For example:
```yaml
{{#include systemd-with-workers/workers/generic_worker.yaml}}
worker_app: synapse.app.generic_worker
worker_name: worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083
resources:
- names:
- client
- federation
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
```
...is a full configuration for a generic worker instance, which will expose a
@@ -170,8 +185,8 @@ worker: refer to the [stream writers](#stream-writers) section below for further
information.
# Sync requests
^/_matrix/client/(r0|v3)/sync$
^/_matrix/client/(api/v1|r0|v3)/events$
^/_matrix/client/(v2_alpha|r0|v3)/sync$
^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$
^/_matrix/client/(api/v1|r0|v3)/initialSync$
^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
@@ -185,9 +200,13 @@ information.
^/_matrix/federation/v1/query/
^/_matrix/federation/v1/make_join/
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/(v1|v2)/send_join/
^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/send_join/
^/_matrix/federation/v2/send_join/
^/_matrix/federation/v1/send_leave/
^/_matrix/federation/v2/send_leave/
^/_matrix/federation/v1/invite/
^/_matrix/federation/v2/invite/
^/_matrix/federation/v1/query_auth/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
@@ -255,8 +274,6 @@ information.
Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/federation/v1/groups/
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/(r0|v3|unstable)/groups/
Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to
@@ -328,9 +345,9 @@ effects of bursts of events from that bridge on events sent by normal users.
#### Stream writers
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
(This is only supported with Redis-based replication.)
Additionally, there is *experimental* support for moving writing of specific
streams (such as events) off of the main process to a particular worker. (This
is only supported with Redis-based replication.)
To enable this, the worker must have a HTTP replication listener configured,
have a `worker_name` and be listed in the `instance_map` config. The same worker
@@ -350,12 +367,6 @@ stream_writers:
events: event_persister1
```
An example for a stream writer instance:
```yaml
{{#include systemd-with-workers/workers/event_persister.yaml}}
```
Some of the streams have associated endpoints which, for maximum efficiency, should
be routed to the workers handling that stream. See below for the currently supported
streams and the endpoints associated with them:
@@ -386,23 +397,23 @@ the stream writer for the `typing` stream:
The following endpoints should be routed directly to the worker configured as
the stream writer for the `to_device` stream:
^/_matrix/client/(r0|v3|unstable)/sendToDevice/
^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
##### The `account_data` stream
The following endpoints should be routed directly to the worker configured as
the stream writer for the `account_data` stream:
^/_matrix/client/(r0|v3|unstable)/.*/tags
^/_matrix/client/(r0|v3|unstable)/.*/account_data
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
##### The `receipts` stream
The following endpoints should be routed directly to the worker configured as
the stream writer for the `receipts` stream:
^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
##### The `presence` stream
@@ -413,7 +424,7 @@ the stream writer for the `presence` stream:
#### Background tasks
There is also support for moving background tasks to a separate
There is also *experimental* support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled).
@@ -426,49 +437,9 @@ the shared configuration would include:
run_background_tasks_on: background_worker
```
You might also wish to investigate the `update_user_directory_from_worker` and
You might also wish to investigate the `update_user_directory` and
`media_instance_running_background_jobs` settings.
An example for a dedicated background worker instance:
```yaml
{{#include systemd-with-workers/workers/background_worker.yaml}}
```
#### Updating the User Directory
You can designate one generic worker to update the user directory.
Specify its name in the shared configuration as follows:
```yaml
update_user_directory_from_worker: worker_name
```
This work cannot be load-balanced; please ensure the main process is restarted
after setting this option in the shared configuration!
This style of configuration supersedes the legacy `synapse.app.user_dir`
worker application type.
#### Notifying Application Services
You can designate one generic worker to send output traffic to Application Services.
Specify its name in the shared configuration as follows:
```yaml
notify_appservices_from_worker: worker_name
```
This work cannot be load-balanced; please ensure the main process is restarted
after setting this option in the shared configuration!
This style of configuration supersedes the legacy `synapse.app.appservice`
worker application type.
### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
@@ -487,9 +458,6 @@ pusher_instances:
### `synapse.app.appservice`
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
`notify_appservices_from_worker` option instead.](#notifying-application-services)
Handles sending output traffic to Application Services. Doesn't handle any
REST endpoints itself, but you should set `notify_appservices: False` in the
shared configuration file to stop the main synapse sending appservice notifications.
@@ -557,34 +525,22 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
### `synapse.app.user_dir`
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
`update_user_directory_from_worker` option instead.](#updating-the-user-directory)
Handles searches in the user directory. It can handle REST endpoints matching
the following regular expressions:
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$
When using this worker you must also set `update_user_directory: false` in the
When using this worker you must also set `update_user_directory: False` in the
shared configuration file to stop the main synapse running background
jobs related to updating the user directory.
Above endpoint is not *required* to be routed to this worker. By default,
`update_user_directory` is set to `true`, which means the main process
will handle updates. All workers configured with `client` can handle the above
endpoint as long as either this worker or the main process are configured to
handle it, and are online.
If `update_user_directory` is set to `false`, and this worker is not running,
the above endpoint may give outdated results.
### `synapse.app.frontend_proxy`
Proxies some frequently-requested client endpoints to add caching and remove
load from the main synapse. It can handle REST endpoints matching the following
regular expressions:
^/_matrix/client/(r0|v3|unstable)/keys/upload
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload
If `use_presence` is False in the homeserver config, it can also handle REST
endpoints matching the following regular expressions:
@@ -654,14 +610,14 @@ The following shows an example setup using Redis and a reverse proxy:
| Main | | Generic | | Generic | | Event |
| Process | | Worker 1 | | Worker 2 | | Persister |
+--------------+ +--------------+ +--------------+ +--------------+
^ ^ | ^ | | ^ | | ^ ^
| | | | | | | | | | |
| | | | | HTTP | | | | | |
| +----------+<--|---|---------+<--|---|---------+ | |
| | +-------------|-->+-------------+ |
| | | |
| | | |
v v v v
======================================================================
^ ^ | ^ | | ^ | ^ ^
| | | | | | | | | |
| | | | | HTTP | | | | |
| +----------+<--|---|---------+ | | | |
| | +-------------|-->+----------+ |
| | | |
| | | |
v v v v
====================================================================
Redis pub/sub channel
```

103
mypy.ini
View File

@@ -7,13 +7,12 @@ show_error_codes = True
show_traceback = True
mypy_path = stubs
warn_unreachable = True
warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
files =
docker/,
scripts-dev/,
setup.py,
synapse/,
tests/
@@ -24,12 +23,33 @@ files =
# https://docs.python.org/3/library/re.html#re.X
exclude = (?x)
^(
|scripts-dev/build_debian_packages.py
|scripts-dev/check_signature.py
|scripts-dev/definitions.py
|scripts-dev/federation_client.py
|scripts-dev/hash_history.py
|scripts-dev/list_url_patterns.py
|scripts-dev/release.py
|scripts-dev/tail-synapse.py
|synapse/_scripts/export_signing_key.py
|synapse/_scripts/move_remote_media_to_new_store.py
|synapse/_scripts/synapse_port_db.py
|synapse/_scripts/update_synapse_database.py
|synapse/storage/databases/__init__.py
|synapse/storage/databases/main/__init__.py
|synapse/storage/databases/main/cache.py
|synapse/storage/databases/main/devices.py
|synapse/storage/databases/main/event_federation.py
|synapse/storage/databases/main/group_server.py
|synapse/storage/databases/main/metrics.py
|synapse/storage/databases/main/monthly_active_users.py
|synapse/storage/databases/main/push_rule.py
|synapse/storage/databases/main/receipts.py
|synapse/storage/databases/main/roommember.py
|synapse/storage/databases/main/search.py
|synapse/storage/databases/main/state.py
|synapse/storage/schema/
|tests/api/test_auth.py
@@ -46,6 +66,14 @@ exclude = (?x)
|tests/federation/test_federation_server.py
|tests/federation/transport/test_knocking.py
|tests/federation/transport/test_server.py
|tests/handlers/test_cas.py
|tests/handlers/test_directory.py
|tests/handlers/test_e2e_keys.py
|tests/handlers/test_federation.py
|tests/handlers/test_oidc.py
|tests/handlers/test_presence.py
|tests/handlers/test_profile.py
|tests/handlers/test_saml.py
|tests/handlers/test_typing.py
|tests/http/federation/test_matrix_federation_agent.py
|tests/http/federation/test_srv_resolver.py
@@ -57,6 +85,7 @@ exclude = (?x)
|tests/logging/test_terse_json.py
|tests/module_api/test_api.py
|tests/push/test_email.py
|tests/push/test_http.py
|tests/push/test_presentable_names.py
|tests/push/test_push_rule_evaluator.py
|tests/rest/client/test_transactions.py
@@ -65,12 +94,19 @@ exclude = (?x)
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
|tests/state/test_v2.py
|tests/storage/test_background_update.py
|tests/storage/test_base.py
|tests/storage/test_client_ips.py
|tests/storage/test_database.py
|tests/storage/test_event_federation.py
|tests/storage/test_id_generators.py
|tests/storage/test_roommember.py
|tests/test_metrics.py
|tests/test_phone_home.py
|tests/test_server.py
|tests/test_state.py
|tests/test_terms_auth.py
|tests/unittest.py
|tests/util/caches/test_cached_call.py
|tests/util/caches/test_deferred_cache.py
|tests/util/caches/test_descriptors.py
@@ -89,9 +125,6 @@ exclude = (?x)
|tests/utils.py
)$
[mypy-synapse._scripts.*]
disallow_untyped_defs = True
[mypy-synapse.api.*]
disallow_untyped_defs = True
@@ -101,7 +134,7 @@ disallow_untyped_defs = True
[mypy-synapse.appservice.*]
disallow_untyped_defs = True
[mypy-synapse.config.*]
[mypy-synapse.config._base]
disallow_untyped_defs = True
[mypy-synapse.crypto.*]
@@ -131,11 +164,6 @@ disallow_untyped_defs = True
[mypy-synapse.metrics.*]
disallow_untyped_defs = True
[mypy-synapse.metrics._reactor_metrics]
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
# See https://github.com/matrix-org/synapse/pull/11771.
warn_unused_ignores = False
[mypy-synapse.module_api.*]
disallow_untyped_defs = True
@@ -235,32 +263,66 @@ disallow_untyped_defs = True
;; The `typeshed` project maintains stubs here:
;; https://github.com/python/typeshed/tree/master/stubs
;; and for each package `foo` there's a corresponding `types-foo` package on PyPI,
;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
;; `[tool.poetry.dev-dependencies]` list.
;; which we can pull in as a dev dependency by adding to `setup.py`'s
;; `CONDITIONAL_REQUIREMENTS["mypy"]` list.
[mypy-authlib.*]
ignore_missing_imports = True
[mypy-bcrypt]
ignore_missing_imports = True
[mypy-canonicaljson]
ignore_missing_imports = True
[mypy-constantly]
ignore_missing_imports = True
[mypy-daemonize]
ignore_missing_imports = True
[mypy-h11]
ignore_missing_imports = True
[mypy-hiredis]
ignore_missing_imports = True
[mypy-hyperlink]
ignore_missing_imports = True
[mypy-ijson.*]
ignore_missing_imports = True
[mypy-jaeger_client.*]
ignore_missing_imports = True
[mypy-josepy.*]
ignore_missing_imports = True
[mypy-jwt.*]
ignore_missing_imports = True
[mypy-lxml]
ignore_missing_imports = True
[mypy-msgpack]
ignore_missing_imports = True
# Note: WIP stubs available at
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
[mypy-nacl.*]
ignore_missing_imports = True
[mypy-netaddr]
ignore_missing_imports = True
[mypy-parameterized.*]
ignore_missing_imports = True
[mypy-phonenumbers.*]
ignore_missing_imports = True
[mypy-prometheus_client.*]
ignore_missing_imports = True
[mypy-pymacaroons.*]
ignore_missing_imports = True
@@ -273,14 +335,23 @@ ignore_missing_imports = True
[mypy-saml2.*]
ignore_missing_imports = True
[mypy-sentry_sdk]
ignore_missing_imports = True
[mypy-service_identity.*]
ignore_missing_imports = True
[mypy-srvlookup.*]
[mypy-signedjson.*]
ignore_missing_imports = True
[mypy-treq.*]
ignore_missing_imports = True
[mypy-twisted.*]
ignore_missing_imports = True
[mypy-zope]
ignore_missing_imports = True
[mypy-incremental.*]
ignore_missing_imports = True

2793
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -36,9 +36,24 @@
[tool.black]
target-version = ['py37', 'py38', 'py39', 'py310']
# black ignores everything in .gitignore by default, see
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
# Use `extend-exclude` if you want to exclude something in addition to this.
exclude = '''
(
/(
\.eggs # exclude a few common directories in the
| \.git # root of the project
| \.tox
| \.venv
| \.env
| env
| _build
| _trial_temp.*
| build
| dist
| debian
)/
)
'''
[tool.isort]
line_length = 88
@@ -50,239 +65,4 @@ known_twisted = ["twisted", "OpenSSL"]
multi_line_output = 3
include_trailing_comma = true
combine_as_imports = true
skip_gitignore = true
[tool.poetry]
name = "matrix-synapse"
version = "1.59.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
readme = "README.rst"
repository = "https://github.com/matrix-org/synapse"
packages = [
{ include = "synapse" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Chat",
]
include = [
{ path = "AUTHORS.rst", format = "sdist" },
{ path = "book.toml", format = "sdist" },
{ path = "changelog.d", format = "sdist" },
{ path = "CHANGES.md", format = "sdist" },
{ path = "CONTRIBUTING.md", format = "sdist" },
{ path = "demo", format = "sdist" },
{ path = "docs", format = "sdist" },
{ path = "INSTALL.md", format = "sdist" },
{ path = "mypy.ini", format = "sdist" },
{ path = "scripts-dev", format = "sdist" },
{ path = "synmark", format="sdist" },
{ path = "sytest-blacklist", format = "sdist" },
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
]
[tool.poetry.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
synapse_worker = "synapse.app.generic_worker:main"
synctl = "synapse._scripts.synctl:main"
export_signing_key = "synapse._scripts.export_signing_key:main"
generate_config = "synapse._scripts.generate_config:main"
generate_log_config = "synapse._scripts.generate_log_config:main"
generate_signing_key = "synapse._scripts.generate_signing_key:main"
hash_password = "synapse._scripts.hash_password:main"
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.7.1"
# Mandatory Dependencies
# ----------------------
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
frozendict = ">=1,!=2.1.2"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
canonicaljson = ">=1.4.0"
# we use the type definitions added in signedjson 1.1.
signedjson = ">=1.1.0"
PyNaCl = ">=1.2.1"
# validating SSL certs for IP addresses requires service_identity 18.1.
service-identity = ">=18.1.0"
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=3.11"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.0"
Pillow = ">=5.4.0"
sortedcontainers = ">=1.4.4"
pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0"
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
prometheus-client = ">=0.4.0"
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see #9936
attrs = ">=19.2.0,!=21.1.0"
netaddr = ">=0.7.18"
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
# add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0"
bleach = ">=1.4.3"
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
# generic over ParamSpecs.
typing-extensions = ">=3.10.0.1"
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "~=1.1.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
importlib_metadata = { version = ">=1.4", python = "<3.8" }
# Optional Dependencies
# ---------------------
matrix-synapse-ldap3 = { version = ">=0.1", optional = true }
psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true }
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
authlib = { version = ">=0.14.0", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.2.0", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
pyjwt = { version = ">=1.6.4", optional = true }
txredisapi = { version = ">=1.4.7", optional = true }
hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true }
[tool.poetry.extras]
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
# twice: once here, and once in the `all` extra.
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
saml2 = ["pysaml2"]
oidc = ["authlib"]
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
url_preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["pyjwt"]
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache_memory = ["pympler"]
test = ["parameterized", "idna"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
# 1) for new installations, I want instructions in existing documentation and tutorials
# out there to still work.
# 2) I don't want to hard-code a list of extras into CI if I can help it. The ideal
# solution here would be something like https://github.com/python-poetry/poetry/issues/3413
# Poetry 1.2's dependency groups might make this easier. But I'm not trying that out
# until there's a stable release of 1.2.
#
# NB: the strings in this list must be *package* names, not extra names.
# Some of our extra names _are_ package names, which can lead to great confusion.
all = [
# matrix-synapse-ldap3
"matrix-synapse-ldap3",
# postgres
"psycopg2", "psycopg2cffi", "psycopg2cffi-compat",
# saml2
"pysaml2",
# oidc
"authlib",
# url_preview
"lxml",
# sentry
"sentry-sdk",
# opentracing
"jaeger-client", "opentracing",
# jwt
"pyjwt",
# redis
"txredisapi", "hiredis",
# cache_memory
"pympler",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
]
[tool.poetry.dev-dependencies]
## We pin black so that our tests don't start failing on new releases.
isort = "==5.7.0"
black = "==22.3.0"
flake8-comprehensions = "*"
flake8-bugbear = "==21.3.2"
flake8 = "*"
# Typechecking
mypy = "*"
mypy-zope = "*"
types-bleach = ">=4.1.0"
types-commonmark = ">=0.9.2"
types-jsonschema = ">=3.2.0"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
types-pyOpenSSL = ">=20.0.7"
types-PyYAML = ">=5.4.10"
types-requests = ">=2.26.0"
types-setuptools = ">=57.4.0"
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
# parameterized<0.7.4 can create classes with names that would normally be invalid
# identifiers. trial really does not like this when running with multiple workers.
parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
click = "==8.1.0"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
commonmark = "==0.9.1"
pygithub = "==1.55"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

View File

@@ -17,8 +17,7 @@ import subprocess
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
from types import FrameType
from typing import Collection, Optional, Sequence, Set
from typing import Optional, Sequence
DISTS = (
"debian:buster", # oldstable: EOL 2022-08
@@ -27,7 +26,6 @@ DISTS = (
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:impish", # 21.10 (EOL 2022-07)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
)
DESC = """\
@@ -42,17 +40,15 @@ projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class Builder(object):
def __init__(
self,
redirect_stdout: bool = False,
docker_build_args: Optional[Sequence[str]] = None,
self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None
):
self.redirect_stdout = redirect_stdout
self._docker_build_args = tuple(docker_build_args or ())
self.active_containers: Set[str] = set()
self.active_containers = set()
self._lock = threading.Lock()
self._failed = False
def run_build(self, dist: str, skip_tests: bool = False) -> None:
def run_build(self, dist, skip_tests=False):
"""Build deb for a single distribution"""
if self._failed:
@@ -66,7 +62,7 @@ class Builder(object):
self._failed = True
raise
def _inner_build(self, dist: str, skip_tests: bool = False) -> None:
def _inner_build(self, dist, skip_tests=False):
tag = dist.split(":", 1)[1]
# Make the dir where the debs will live.
@@ -141,7 +137,7 @@ class Builder(object):
stdout.close()
print("Completed build of %s" % (dist,))
def kill_containers(self) -> None:
def kill_containers(self):
with self._lock:
active = list(self.active_containers)
@@ -159,10 +155,8 @@ class Builder(object):
self.active_containers.remove(c)
def run_builds(
builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False
) -> None:
def sig(signum: int, _frame: Optional[FrameType]) -> None:
def run_builds(builder, dists, jobs=1, skip_tests=False):
def sig(signum, _frame):
print("Caught SIGINT")
builder.kill_containers()

View File

@@ -19,7 +19,7 @@ if ! git diff --quiet FETCH_HEAD... -- debian; then
if git diff --quiet FETCH_HEAD... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
echo "https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#debian-changelog" >&2
echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
exit 1
fi
fi
@@ -32,7 +32,7 @@ fi
# Print a link to the contributing guide if the user makes a mistake
CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog"
https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)

View File

@@ -0,0 +1,72 @@
import argparse
import json
import logging
import sys
import dns.resolver
import urllib2
from signedjson.key import decode_verify_key_bytes, write_signing_keys
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
def get_targets(server_name):
if ":" in server_name:
target, port = server_name.split(":")
yield (target, int(port))
return
try:
answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
for srv in answers:
yield (srv.target, srv.port)
except dns.resolver.NXDOMAIN:
yield (server_name, 8448)
def get_server_keys(server_name, target, port):
url = "https://%s:%i/_matrix/key/v1" % (target, port)
keys = json.load(urllib2.urlopen(url))
verify_keys = {}
for key_id, key_base64 in keys["verify_keys"].items():
verify_key = decode_verify_key_bytes(key_id, decode_base64(key_base64))
verify_signed_json(keys, server_name, verify_key)
verify_keys[key_id] = verify_key
return verify_keys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("signature_name")
parser.add_argument(
"input_json", nargs="?", type=argparse.FileType("r"), default=sys.stdin
)
args = parser.parse_args()
logging.basicConfig()
server_name = args.signature_name
keys = {}
for target, port in get_targets(server_name):
try:
keys = get_server_keys(server_name, target, port)
print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
write_signing_keys(sys.stdout, keys.values())
break
except Exception:
logging.exception("Error talking to %s:%s", target, port)
json_to_check = json.load(args.input_json)
print("Checking JSON:")
for key_id in json_to_check["signatures"][args.signature_name]:
try:
key = keys[key_id]
verify_signed_json(json_to_check, args.signature_name, key)
print("PASS %s" % (key_id,))
except Exception:
logging.exception("Check for key %s failed" % (key_id,))
print("FAIL %s" % (key_id,))
if __name__ == "__main__":
main()

View File

@@ -43,8 +43,6 @@ fi
# Build the base Synapse image from the local checkout
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
extra_test_args=()
# If we're using workers, modify the docker files slightly.
if [[ -n "$WORKERS" ]]; then
# Build the workers docker image (from the base Synapse image).
@@ -52,25 +50,25 @@ if [[ -n "$WORKERS" ]]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
# And provide some more configuration to complement.
# It can take quite a while to spin up a worker-mode Synapse for the first
# time (the main problem is that we start 14 python processes for each test,
# and complement likes to do two of them in parallel).
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
# ... and it takes longer than 10m to run the whole suite.
extra_test_args+=("-timeout=60m")
export COMPLEMENT_CA=true
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=25
else
export COMPLEMENT_BASE_IMAGE=complement-synapse
COMPLEMENT_DOCKERFILE=Dockerfile
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
fi
# Build the Complement image from the Synapse image we just built.
docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement"
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
cd "$COMPLEMENT_DIR"
EXTRA_COMPLEMENT_ARGS=""
if [[ -n "$1" ]]; then
# A test name regex has been set, supply it to Complement
EXTRA_COMPLEMENT_ARGS+="-run $1 "
fi
# Run the tests!
echo "Images built; running complement"
cd "$COMPLEMENT_DIR"
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...

208
scripts-dev/definitions.py Executable file
View File

@@ -0,0 +1,208 @@
#! /usr/bin/python
import argparse
import ast
import os
import re
import sys
import yaml
class DefinitionVisitor(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.functions = {}
self.classes = {}
self.names = {}
self.attrs = set()
self.definitions = {
"def": self.functions,
"class": self.classes,
"names": self.names,
"attrs": self.attrs,
}
def visit_Name(self, node):
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
def visit_Attribute(self, node):
self.attrs.add(node.attr)
for child in ast.iter_child_nodes(node):
self.visit(child)
def visit_ClassDef(self, node):
visitor = DefinitionVisitor()
self.classes[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def visit_FunctionDef(self, node):
visitor = DefinitionVisitor()
self.functions[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def non_empty(defs):
functions = {name: non_empty(f) for name, f in defs["def"].items()}
classes = {name: non_empty(f) for name, f in defs["class"].items()}
result = {}
if functions:
result["def"] = functions
if classes:
result["class"] = classes
names = defs["names"]
uses = []
for name in names.get("Load", ()):
if name not in names.get("Param", ()) and name not in names.get("Store", ()):
uses.append(name)
uses.extend(defs["attrs"])
if uses:
result["uses"] = uses
result["names"] = names
result["attrs"] = defs["attrs"]
return result
def definitions_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = DefinitionVisitor()
visitor.visit(input_ast)
definitions = non_empty(visitor.definitions)
return definitions
def definitions_in_file(filepath):
with open(filepath) as f:
return definitions_in_code(f.read())
def defined_names(prefix, defs, names):
for name, funcs in defs.get("def", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
for name, funcs in defs.get("class", {}).items():
names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
def used_names(prefix, item, defs, names):
for name, funcs in defs.get("def", {}).items():
used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get("class", {}).items():
used_names(prefix + name + ".", name, funcs, names)
path = prefix.rstrip(".")
for used in defs.get("uses", ()):
if used in names:
if item:
names[item].setdefault("uses", []).append(used)
names[used].setdefault("used", {}).setdefault(item, []).append(path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find definitions.")
parser.add_argument(
"--unused", action="store_true", help="Only list unused definitions"
)
parser.add_argument(
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
)
parser.add_argument(
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
)
parser.add_argument(
"directories",
nargs="+",
metavar="DIR",
help="Directories to search for definitions",
)
parser.add_argument(
"--referrers",
default=0,
type=int,
help="Include referrers up to the given depth",
)
parser.add_argument(
"--referred",
default=0,
type=int,
help="Include referred down to the given depth",
)
parser.add_argument(
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
definitions = {}
for directory in args.directories:
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
definitions[filepath] = definitions_in_file(filepath)
names = {}
for filepath, defs in definitions.items():
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
result = {}
for name, definition in names.items():
if patterns and not any(pattern.match(name) for pattern in patterns):
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
if args.unused and definition.get("used"):
continue
result[name] = definition
referrer_depth = args.referrers
referrers = set()
while referrer_depth:
referrer_depth -= 1
for entry in result.values():
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
if name not in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
referred_depth = args.referred
referred = set()
while referred_depth:
referred_depth -= 1
for entry in result.values():
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
if name not in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
if args.format == "yaml":
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == "dot":
print("digraph {")
for name, entry in result.items():
print(name)
for used_by in entry.get("used", ()):
if used_by in result:
print(used_by, "->", name)
print("}")
else:
raise ValueError("Unknown format %r" % (args.format))

View File

@@ -38,7 +38,7 @@ import argparse
import base64
import json
import sys
from typing import Any, Dict, Optional, Tuple
from typing import Any, Optional
from urllib import parse as urlparse
import requests
@@ -47,14 +47,13 @@ import signedjson.types
import srvlookup
import yaml
from requests.adapters import HTTPAdapter
from urllib3 import HTTPConnectionPool
# uncomment the following to enable debug logging of http requests
# from httplib import HTTPConnection
# HTTPConnection.debuglevel = 1
def encode_base64(input_bytes: bytes) -> str:
def encode_base64(input_bytes):
"""Encode bytes as a base64 string without any padding."""
input_len = len(input_bytes)
@@ -64,7 +63,7 @@ def encode_base64(input_bytes: bytes) -> str:
return output_string
def encode_canonical_json(value: object) -> bytes:
def encode_canonical_json(value):
return json.dumps(
value,
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
@@ -125,13 +124,8 @@ def request(
authorization_headers = []
for key, sig in signed_json["signatures"][origin_name].items():
header = 'X-Matrix origin=%s,key="%s",sig="%s",destination="%s"' % (
origin_name,
key,
sig,
destination,
)
authorization_headers.append(header)
header = 'X-Matrix origin=%s,key="%s",sig="%s"' % (origin_name, key, sig)
authorization_headers.append(header.encode("ascii"))
print("Authorization: %s" % header, file=sys.stderr)
dest = "matrix://%s%s" % (destination, path)
@@ -140,10 +134,7 @@ def request(
s = requests.Session()
s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
"Host": destination,
"Authorization": authorization_headers[0],
}
headers = {"Host": destination, "Authorization": authorization_headers[0]}
if method == "POST":
headers["Content-Type"] = "application/json"
@@ -158,7 +149,7 @@ def request(
)
def main() -> None:
def main():
parser = argparse.ArgumentParser(
description="Signs and sends a federation request to a matrix homeserver"
)
@@ -216,7 +207,6 @@ def main() -> None:
if not args.server_name or not args.signing_key:
read_args_from_config(args)
assert isinstance(args.signing_key, str)
algorithm, version, key_base64 = args.signing_key.split()
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
@@ -238,7 +228,7 @@ def main() -> None:
print("")
def read_args_from_config(args: argparse.Namespace) -> None:
def read_args_from_config(args):
with open(args.config, "r") as fh:
config = yaml.safe_load(fh)
@@ -255,7 +245,7 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter):
@staticmethod
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
def lookup(s, skip_well_known=False):
if s[-1] == "]":
# ipv6 literal (with no port)
return s, 8448
@@ -281,7 +271,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
return s, 8448
@staticmethod
def get_well_known(server_name: str) -> Optional[str]:
def get_well_known(server_name):
uri = "https://%s/.well-known/matrix/server" % (server_name,)
print("fetching %s" % (uri,), file=sys.stderr)
@@ -304,9 +294,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
return None
def get_connection(
self, url: str, proxies: Optional[Dict[str, str]] = None
) -> HTTPConnectionPool:
def get_connection(self, url, proxies=None):
parsed = urlparse.urlparse(url)
(host, port) = self.lookup(parsed.netloc)

View File

@@ -0,0 +1,81 @@
import sqlite3
import sys
from unpaddedbase64 import decode_base64, encode_base64
from synapse.crypto.event_signing import (
add_event_pdu_content_hash,
compute_pdu_event_reference_hash,
)
from synapse.federation.units import Pdu
from synapse.storage._base import SQLBaseStore
from synapse.storage.pdu import PduStore
from synapse.storage.signatures import SignatureStore
class Store:
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
_get_pdu_origin_signatures_txn = SignatureStore.__dict__[
"_get_pdu_origin_signatures_txn"
]
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
_store_pdu_reference_hash_txn = SignatureStore.__dict__[
"_store_pdu_reference_hash_txn"
]
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
store = Store()
def select_pdus(cursor):
cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
ids = cursor.fetchall()
pdu_tuples = store._get_pdu_tuples(cursor, ids)
pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
reference_hashes = {}
for pdu in pdus:
try:
if pdu.prev_pdus:
print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
for pdu_id, origin, hashes in pdu.prev_pdus:
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
hashes[ref_alg] = encode_base64(ref_hsh)
store._store_prev_pdu_hash_txn(
cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
)
print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
pdu = add_event_pdu_content_hash(pdu)
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
store._store_pdu_reference_hash_txn(
cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
)
for alg, hsh_base64 in pdu.hashes.items():
print(alg, hsh_base64)
store._store_pdu_content_hash_txn(
cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
)
except Exception:
print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
def main():
conn = sqlite3.connect(sys.argv[1])
cursor = conn.cursor()
select_pdus(cursor)
conn.commit()
if __name__ == "__main__":
main()

View File

@@ -79,19 +79,13 @@ else
# If we were not asked to lint changed files, and no paths were found as a result,
# then lint everything!
if [[ -z ${files+x} ]]; then
# CI runs each linter on the entire checkout, e.g. `black .`. So don't
# rely on this list to *find* lint targets if that misses a file; instead;
# use it to exclude files from linters when this can't be done by config.
#
# To check which files the linters examine, use:
# black --verbose . 2>&1 | \grep -v ignored
# isort --show-files .
# flake8 --verbose . # This isn't a great option
# mypy has explicit config in mypy.ini; there is also mypy --verbose
# Lint all source code files and directories
# Note: this list aims to mirror the one in tox.ini
files=(
"synapse" "docker" "tests"
# annoyingly, black doesn't find these so we have to list them
"scripts-dev"
"contrib" "synmark" "stubs" ".ci"
"contrib" "setup.py" "synmark" "stubs" ".ci"
)
fi
fi

View File

@@ -0,0 +1,60 @@
#! /usr/bin/python
import argparse
import ast
import os
import sys
import yaml
PATTERNS_V1 = []
PATTERNS_V2 = []
RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
class CallVisitor(ast.NodeVisitor):
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
name = node.func.id
else:
return
if name == "client_patterns":
PATTERNS_V2.append(node.args[0].s)
def find_patterns_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = CallVisitor()
visitor.visit(input_ast)
def find_patterns_in_file(filepath):
with open(filepath) as f:
find_patterns_in_code(f.read())
parser = argparse.ArgumentParser(description="Find url patterns.")
parser.add_argument(
"directories",
nargs="+",
metavar="DIR",
help="Directories to search for definitions",
)
args = parser.parse_args()
for directory in args.directories:
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
find_patterns_in_file(filepath)
PATTERNS_V1.sort()
PATTERNS_V2.sort()
yaml.dump(RESULT, sys.stdout, default_flow_style=False)

View File

@@ -16,7 +16,7 @@
can crop up, e.g the cache descriptors.
"""
from typing import Callable, Optional, Type
from typing import Callable, Optional
from mypy.nodes import ARG_NAMED_OPT
from mypy.plugin import MethodSigContext, Plugin
@@ -94,7 +94,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
return signature
def plugin(version: str) -> Type[SynapsePlugin]:
def plugin(version: str):
# This is the entry point of the plugin, and let's us deal with the fact
# that the mypy plugin interface is *not* stable by looking at the version
# string.

View File

@@ -25,20 +25,19 @@ import sys
import urllib.request
from os import path
from tempfile import TemporaryDirectory
from typing import Any, List, Optional, cast
from typing import List, Optional, Tuple
import attr
import click
import commonmark
import git
import redbaron
from click.exceptions import ClickException
from github import Github
from packaging import version
def run_until_successful(
command: str, *args: Any, **kwargs: Any
) -> subprocess.CompletedProcess:
def run_until_successful(command, *args, **kwargs):
while True:
completed_process = subprocess.run(command, *args, **kwargs)
exit_code = completed_process.returncode
@@ -52,7 +51,7 @@ def run_until_successful(
@click.group()
def cli() -> None:
def cli():
"""An interactive script to walk through the parts of creating a release.
Requires the dev dependencies be installed, which can be done via:
@@ -67,35 +66,36 @@ def cli() -> None:
./scripts-dev/release.py tag
# ... wait for assets to build ...
# ... wait for asssets to build ...
./scripts-dev/release.py publish
./scripts-dev/release.py upload
# Optional: generate some nice links for the announcement
./scripts-dev/release.py announce
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
`tag`/`publish` command, then a new draft release will be created/published.
"""
@cli.command()
def prepare() -> None:
def prepare():
"""Do the initial stages of creating a release, including creating release
branch, updating changelog and pushing to GitHub.
"""
# Make sure we're in a git repo.
repo = get_repo_and_check_clean_checkout()
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
raise click.ClickException("Not in Synapse repo.")
if repo.is_dirty():
raise click.ClickException("Uncommitted changes exist.")
click.secho("Updating git repo...")
repo.remote().fetch()
# Get the current version and AST from root Synapse module.
current_version = get_package_version()
current_version, parsed_synapse_ast, version_node = parse_version_from_module()
# Figure out what sort of release we're doing and calcuate the new version.
rc = click.confirm("RC", default=True)
@@ -157,21 +157,22 @@ def prepare() -> None:
click.get_current_context().abort()
# Switch to the release branch.
# Cast safety: parse() won't return a version.LegacyVersion from our
# version string format.
parsed_new_version = cast(version.Version, version.parse(new_version))
parsed_new_version = version.parse(new_version)
# We assume for debian changelogs that we only do RCs or full releases.
assert not parsed_new_version.is_devrelease
assert not parsed_new_version.is_postrelease
release_branch_name = get_release_branch_name(parsed_new_version)
release_branch_name = (
f"release-v{parsed_new_version.major}.{parsed_new_version.minor}"
)
release_branch = find_ref(repo, release_branch_name)
if release_branch:
if release_branch.is_remote():
# If the release branch only exists on the remote we check it out
# locally.
repo.git.checkout(release_branch_name)
release_branch = repo.active_branch
else:
# If a branch doesn't exist we create one. We ask which one branch it
# should be based off, defaulting to sensible values depending on the
@@ -193,25 +194,25 @@ def prepare() -> None:
click.get_current_context().abort()
# Check out the base branch and ensure it's up to date
repo.head.set_reference(base_branch, "check out the base branch")
repo.head.reference = base_branch
repo.head.reset(index=True, working_tree=True)
if not base_branch.is_remote():
update_branch(repo)
# Create the new release branch
# Type ignore will no longer be needed after GitPython 3.1.28.
# See https://github.com/gitpython-developers/GitPython/pull/1419
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
release_branch = repo.create_head(release_branch_name, commit=base_branch)
# Switch to the release branch and ensure it's up to date.
# Switch to the release branch and ensure its up to date.
repo.git.checkout(release_branch_name)
update_branch(repo)
# Update the version specified in pyproject.toml.
subprocess.check_output(["poetry", "version", new_version])
# Update the `__version__` variable and write it back to the file.
version_node.value = '"' + new_version + '"'
with open("synapse/__init__.py", "w") as f:
f.write(parsed_synapse_ast.dumps())
# Generate changelogs.
generate_and_write_changelog(current_version, new_version)
generate_and_write_changelog(current_version)
# Generate debian changelogs
if parsed_new_version.pre is not None:
@@ -224,7 +225,7 @@ def prepare() -> None:
debian_version = new_version
run_until_successful(
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
f'dch -M -v {debian_version} "New synapse release {debian_version}."',
shell=True,
)
run_until_successful('dch -M -r -D stable ""', shell=True)
@@ -262,43 +263,35 @@ def prepare() -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
def tag(gh_token: Optional[str]) -> None:
def tag(gh_token: Optional[str]):
"""Tags the release and generates a draft GitHub release"""
# Make sure we're in a git repo.
repo = get_repo_and_check_clean_checkout()
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
raise click.ClickException("Not in Synapse repo.")
if repo.is_dirty():
raise click.ClickException("Uncommitted changes exist.")
click.secho("Updating git repo...")
repo.remote().fetch()
# Find out the version and tag name.
current_version = get_package_version()
current_version, _, _ = parse_version_from_module()
tag_name = f"v{current_version}"
# Check we haven't released this version.
if tag_name in repo.tags:
raise click.ClickException(f"Tag {tag_name} already exists!\n")
# Check we're on the right release branch
release_branch = get_release_branch_name(current_version)
if repo.active_branch.name != release_branch:
click.echo(
f"Need to be on the release branch ({release_branch}) before tagging. "
f"Currently on ({repo.active_branch.name})."
)
click.get_current_context().abort()
# Get the appropriate changelogs and tag.
changes = get_changes_for_version(current_version)
click.echo_via_pager(changes)
if click.confirm("Edit text?", default=False):
edited_changes = click.edit(changes, require_save=False)
# This assert is for mypy's benefit. click's docs are a little unclear, but
# when `require_save=False`, not saving the temp file in the editor returns
# the original string.
assert edited_changes is not None
changes = edited_changes
changes = click.edit(changes, require_save=False)
repo.create_tag(tag_name, message=changes, sign=True)
@@ -352,16 +345,22 @@ def tag(gh_token: Optional[str]) -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
def publish(gh_token: str) -> None:
"""Publish release on GitHub."""
def publish(gh_token: str):
"""Publish release."""
# Make sure we're in a git repo.
get_repo_and_check_clean_checkout()
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
raise click.ClickException("Not in Synapse repo.")
current_version = get_package_version()
if repo.is_dirty():
raise click.ClickException("Uncommitted changes exist.")
current_version, _, _ = parse_version_from_module()
tag_name = f"v{current_version}"
if not click.confirm(f"Publish release {tag_name} on GitHub?", default=True):
if not click.confirm(f"Publish {tag_name}?", default=True):
return
# Publish the draft release
@@ -389,19 +388,12 @@ def publish(gh_token: str) -> None:
@cli.command()
def upload() -> None:
def upload():
"""Upload release to pypi."""
current_version = get_package_version()
current_version, _, _ = parse_version_from_module()
tag_name = f"v{current_version}"
# Check we have the right tag checked out.
repo = get_repo_and_check_clean_checkout()
tag = repo.tag(f"refs/tags/{tag_name}")
if repo.head.commit != tag.commit:
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
click.get_current_context().abort()
pypi_asset_names = [
f"matrix_synapse-{current_version}-py3-none-any.whl",
f"matrix-synapse-{current_version}.tar.gz",
@@ -423,78 +415,53 @@ def upload() -> None:
)
@cli.command()
def announce() -> None:
"""Generate markdown to announce the release."""
def parse_version_from_module() -> Tuple[
version.Version, redbaron.RedBaron, redbaron.Node
]:
# Parse the AST and load the `__version__` node so that we can edit it
# later.
with open("synapse/__init__.py") as f:
red = redbaron.RedBaron(f.read())
current_version = get_package_version()
tag_name = f"v{current_version}"
version_node = None
for node in red:
if node.type != "assignment":
continue
click.echo(
f"""
Hi everyone. Synapse {current_version} has just been released.
if node.target.type != "name":
continue
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
[debs](https://packages.matrix.org/debian/) | \
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
)
if node.target.value != "__version__":
continue
if "rc" in tag_name:
click.echo(
"""
Announce the RC in
- #homeowners:matrix.org (Synapse Announcements)
- #synapse-dev:matrix.org"""
)
else:
click.echo(
"""
Announce the release in
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
- #synapse-dev:matrix.org
- #synapse-package-maintainers:matrix.org"""
)
version_node = node
break
if not version_node:
print("Failed to find '__version__' definition in synapse/__init__.py")
sys.exit(1)
def get_package_version() -> version.Version:
version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
"utf-8"
)
return version.Version(version_string)
# Parse the current version.
current_version = version.parse(version_node.value.value.strip('"'))
assert isinstance(current_version, version.Version)
def get_release_branch_name(version_number: version.Version) -> str:
return f"release-v{version_number.major}.{version_number.minor}"
def get_repo_and_check_clean_checkout() -> git.Repo:
"""Get the project repo and check it's not got any uncommitted changes."""
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
raise click.ClickException("Not in Synapse repo.")
if repo.is_dirty():
raise click.ClickException("Uncommitted changes exist.")
return repo
return current_version, red, version_node
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
"""Find the branch/ref, looking first locally then in the remote."""
if ref_name in repo.references:
return repo.references[ref_name]
if ref_name in repo.refs:
return repo.refs[ref_name]
elif ref_name in repo.remote().refs:
return repo.remote().refs[ref_name]
else:
return None
def update_branch(repo: git.Repo) -> None:
def update_branch(repo: git.Repo):
"""Ensure branch is up to date if it has a remote"""
tracking_branch = repo.active_branch.tracking_branch()
if tracking_branch:
repo.git.merge(tracking_branch.name)
if repo.active_branch.tracking_branch():
repo.git.merge(repo.active_branch.tracking_branch().name)
def get_changes_for_version(wanted_version: version.Version) -> str:
@@ -558,15 +525,11 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
return "\n".join(version_changelog)
def generate_and_write_changelog(
current_version: version.Version, new_version: str
) -> None:
def generate_and_write_changelog(current_version: version.Version):
# We do this by getting a draft so that we can edit it before writing to the
# changelog.
result = run_until_successful(
f"python3 -m towncrier build --draft --version {new_version}",
shell=True,
capture_output=True,
"python3 -m towncrier --draft", shell=True, capture_output=True
)
new_changes = result.stdout.decode("utf-8")
new_changes = new_changes.replace(
@@ -582,8 +545,8 @@ def generate_and_write_changelog(
f.write(existing_content)
# Remove all the news fragments
for filename in glob.iglob("changelog.d/*.*"):
os.remove(filename)
for f in glob.iglob("changelog.d/*.*"):
os.remove(f)
if __name__ == "__main__":

View File

@@ -27,7 +27,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.util import json_encoder
def main() -> None:
def main():
parser = argparse.ArgumentParser(
description="""Adds a signature to a JSON object.

View File

@@ -0,0 +1,67 @@
import collections
import json
import sys
import time
import requests
Entry = collections.namedtuple("Entry", "name position rows")
ROW_TYPES = {}
def row_type_for_columns(name, column_names):
column_names = tuple(column_names)
row_type = ROW_TYPES.get((name, column_names))
if row_type is None:
row_type = collections.namedtuple(name, column_names)
ROW_TYPES[(name, column_names)] = row_type
return row_type
def parse_response(content):
streams = json.loads(content)
result = {}
for name, value in streams.items():
row_type = row_type_for_columns(name, value["field_names"])
position = value["position"]
rows = [row_type(*row) for row in value["rows"]]
result[name] = Entry(name, position, rows)
return result
def replicate(server, streams):
return parse_response(
requests.get(
server + "/_synapse/replication", verify=False, params=streams
).content
)
def main():
server = sys.argv[1]
streams = None
while not streams:
try:
streams = {
row.name: row.position
for row in replicate(server, {"streams": "-1"})["streams"].rows
}
except requests.exceptions.ConnectionError:
time.sleep(0.1)
while True:
try:
results = replicate(server, streams)
except Exception:
sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
break
for update in results.values():
for row in update.rows:
sys.stdout.write(repr(row) + "\n")
streams[update.name] = update.position
if __name__ == "__main__":
main()

19
scripts-dev/test_postgresql.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# This script builds the Docker image to run the PostgreSQL tests, and then runs
# the tests. It uses a dedicated tox environment so that we don't have to
# rebuild it each time.
# Command line arguments to this script are forwarded to "tox" and then to "trial".
set -e
# Build, and tag
docker build docker/ \
--build-arg "UID=$(id -u)" \
--build-arg "GID=$(id -g)" \
-f docker/Dockerfile-pgtests \
-t synapsepgtests
# Run, mounting the current directory into /src
docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"

Some files were not shown because too many files have changed in this diff Show More