1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Action Bot
a5d7d6b40d Version picker added for v1.67 docs 2023-12-11 14:52:10 +00:00
772 changed files with 18772 additions and 49322 deletions

View File

@@ -1,141 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
# compatible wheel, if so rename the wheel before repairing it.
import argparse
import os
import subprocess
from typing import Optional
from zipfile import ZipFile
from packaging.tags import Tag
from packaging.utils import parse_wheel_filename
from packaging.version import Version
def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}")
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
# HACK: it seems that some older versions of pip will consider a wheel marked
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
# here; there are some clues in
# https://github.com/pantsbuild/pants/pull/12857
# https://github.com/pypa/pip/issues/9138
# https://github.com/pypa/packaging/pull/319
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
# both versions are valid for backwards compatibility.
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
abi3_tag = Tag(tag.interpreter, "abi3", platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
"""Entry point"""
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
# normalizes the package name (i.e. it converts matrix_synapse ->
# matrix-synapse), which is not what we want.
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
name = os.path.basename(wheel_file).split("-")[0]
if len(tags) != 1:
# We expect only a wheel file with only a single tag
raise Exception(f"Unexpectedly found multiple tags: {tags}")
tag = next(iter(tags))
if build:
# We don't use build tags in Synapse
raise Exception(f"Unexpected build tag: {build}")
# If the wheel is for cpython then convert it into an abi3 wheel.
if tag.interpreter.startswith("cp"):
wheel_file = cpython(wheel_file, name, version, tag)
# Finally, repair the wheel.
if archs is not None:
# If we are given archs then we are on macos and need to use
# `delocate-listdeps`.
subprocess.run(["delocate-listdeps", wheel_file], check=True)
subprocess.run(
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
check=True,
)
else:
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
parser.add_argument(
"--wheel-dir",
"-w",
metavar="WHEEL_DIR",
help="Directory to store delocated wheels",
required=True,
)
parser.add_argument(
"--require-archs",
metavar="archs",
default=None,
)
parser.add_argument(
"wheel_file",
metavar="WHEEL_FILE",
)
args = parser.parse_args()
wheel_file = args.wheel_file
wheel_dir = args.wheel_dir
archs = args.require_archs
main(wheel_file, wheel_dir, archs)

View File

@@ -18,13 +18,6 @@
import json
import os
def set_output(key: str, value: str):
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
print(f"{key}={value}", file=f)
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
@@ -46,7 +39,7 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.8", "3.9", "3.10", "3.11")
for version in ("3.8", "3.9", "3.10")
)
@@ -54,7 +47,7 @@ trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
"postgres-version": "11",
"postgres-version": "10",
"extras": "all",
}
]
@@ -62,9 +55,9 @@ trial_postgres_tests = [
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.11",
"python-version": "3.10",
"database": "postgres",
"postgres-version": "15",
"postgres-version": "14",
"extras": "all",
}
)
@@ -88,7 +81,7 @@ print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
set_output("trial_test_matrix", test_matrix)
print(f"::set-output name=trial_test_matrix::{test_matrix}")
# First calculate the various sytest jobs.
@@ -132,4 +125,4 @@ print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
set_output("sytest_test_matrix", test_matrix)
print(f"::set-output name=sytest_test_matrix::{test_matrix}")

View File

@@ -1,23 +0,0 @@
#! /usr/bin/env python
import sys
if sys.version_info < (3, 11):
raise RuntimeError("Requires at least Python 3.11, to import tomllib")
import tomllib
with open("poetry.lock", "rb") as f:
lockfile = tomllib.load(f)
try:
lock_version = lockfile["metadata"]["lock-version"]
assert lock_version == "2.0"
except Exception:
print(
"""\
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
https://matrix-org.github.io/synapse/develop/development/dependencies.html
"""
)
raise

31
.ci/scripts/postgres_exec.py Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="localhost", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View File

@@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template

View File

@@ -23,9 +23,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-dat
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir_r="/tmp/export_data/rooms"
dir_u="/tmp/export_data/user_data"
if [ -d "$dir_r" ] && [ -d "$dir_u" ]; then
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
@@ -33,7 +32,7 @@ else
fi
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
@@ -44,9 +43,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-d
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir_r2="/tmp/export_data2/rooms"
dir_u2="/tmp/export_data2/user_data"
if [ -d "$dir_r2" ] && [ -d "$dir_u2" ]; then
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."

View File

@@ -5,8 +5,18 @@
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
@@ -23,6 +33,12 @@ export VIRTUALENV_NO_DOWNLOAD=1
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
@@ -39,7 +55,7 @@ sed -i \
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
@@ -53,8 +69,8 @@ with open('pyproject.toml', 'w') as f:
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.3.2
poetry lock
pipx install poetry==1.1.14
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
@@ -62,3 +78,6 @@ echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests

View File

@@ -2,27 +2,27 @@
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe -o pipefail
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
@@ -45,23 +45,9 @@ rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -4,15 +4,8 @@
# things to include
!docker
!synapse
!rust
!README.rst
!pyproject.toml
!poetry.lock
!Cargo.lock
!Cargo.toml
!build_rust.py
rust/target
synapse/*.so
**/__pycache__

View File

@@ -4,7 +4,7 @@
root = true
# 4 space indentation
[*.{py,pyi}]
[*.py]
indent_style = space
indent_size = 4
max_line_length = 88

11
.flake8 Normal file
View File

@@ -0,0 +1,11 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -21,8 +21,4 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
# Update black to 23.1.0 (#15103)
9bb2eac71962970d02842bca441f4bcdbbf93a11
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

View File

@@ -74,36 +74,6 @@ body:
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- I don't know
validations:
required: true
- type: input
id: database
attributes:
label: Database
description: |
Are you using SQLite or PostgreSQL? What's the version of your database?
If PostgreSQL, please also answer the following:
- are you using a single PostgreSQL server
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
- have you previously ported from SQLite using the Synapse "portdb" script?
- have you previously restored from a backup?
validations:
required: true
- type: dropdown
id: workers
attributes:
label: Workers
description: |
Are you running a single Synapse process, or are you running
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
options:
- Single process
- Multiple workers
- I don't know
validations:
required: true
- type: textarea
id: platform
attributes:
@@ -113,28 +83,17 @@ body:
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: config
attributes:
label: Configuration
description: |
Do you have any unusual config options turned on? If so, please provide details.
- Experimental or undocumented features
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks (`\``).
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually impossible to diagnose without logging.**
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true

View File

@@ -1,23 +0,0 @@
version: 2
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "cargo"
directory: "/"
versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"

View File

@@ -1,49 +0,0 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
env:
PR_TITLE: ${{ github.event.pull_request.title }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View File

@@ -17,19 +17,19 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -48,15 +48,10 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v4
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@@ -1,34 +0,0 @@
name: Deploy documentation PR preview
on:
workflow_run:
workflows: [ "Prepare documentation PR preview" ]
types:
- completed
jobs:
netlify:
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
runs-on: ubuntu-latest
steps:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
name: book
path: book
- name: 📤 Deploy to Netlify
uses: matrix-org/netlify-pr-preview@v1
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
branch: ${{ github.event.workflow_run.head_branch }}
revision: ${{ github.event.workflow_run.head_sha }}
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
site_id: ${{ secrets.NETLIFY_SITE_ID }}
desc: Documentation preview
deployment_env: PR Documentation Preview

View File

@@ -1,60 +0,0 @@
name: Prepare documentation PR preview
on:
pull_request:
paths:
- docs/**
- book.toml
- .github/workflows/docs-pr.yaml
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
uses: actions/upload-artifact@v3
with:
name: book
path: book
# We'll only use this in a workflow_run, then we're done with it
retention-days: 1
link-check:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Setup htmltest
run: |
wget https://github.com/wjdp/htmltest/releases/download/v0.17.0/htmltest_0.17.0_linux_amd64.tar.gz
echo '775c597ee74899d6002cd2d93076f897f4ba68686bceabe2e5d72e84c57bc0fb htmltest_0.17.0_linux_amd64.tar.gz' | sha256sum -c
tar zxf htmltest_0.17.0_linux_amd64.tar.gz
- name: Test links with htmltest
# Build the book with `./` as the site URL (to make checks on 404.html possible)
# Then run htmltest (without checking external links since that involves the network and is slow).
run: |
MDBOOK_OUTPUT__HTML__SITE_URL="./" mdbook build
./htmltest book --skip-external

View File

@@ -17,10 +17,10 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
@@ -54,11 +54,11 @@ jobs:
esac
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
echo "::set-output name=branch-version::$branch"
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book

View File

@@ -5,7 +5,7 @@
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
@@ -25,19 +25,13 @@ jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
@@ -58,14 +52,7 @@ jobs:
postgres-version: "14"
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
@@ -74,7 +61,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v4
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
@@ -82,12 +69,6 @@ jobs:
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
@@ -131,14 +112,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
@@ -152,7 +126,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -179,8 +153,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
@@ -198,19 +172,19 @@ jobs:
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -1,24 +0,0 @@
on:
push:
branches: ["develop", "release-*"]
paths:
- poetry.lock
pull_request:
paths:
- poetry.lock
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-sdists:
name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- run: pip install tomli
- run: ./scripts-dev/check_locked_deps_have_sdists.py

View File

@@ -1,74 +0,0 @@
# This task does not run complement tests, see tests.yaml instead.
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
name: Store complement-synapse image in ghcr.io
on:
push:
branches: [ "master" ]
schedule:
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
branch:
required: true
default: 'develop'
type: choice
options:
- develop
- master
# Only run this action once per pull request/branch; restart if a new commit arrives.
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
name: Build and push complement image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout specific branch (debug build)
uses: actions/checkout@v3
if: github.event_name == 'workflow_dispatch'
with:
ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@v3
if: github.event_name == 'schedule'
with:
ref: develop
- name: Checkout clean copy of master (on-push)
uses: actions/checkout@v3
if: github.event_name == 'push'
with:
ref: master
- name: Login to registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}/complement-synapse
tags: |
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
type=sha,format=long
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
run: scripts-dev/complement.sh --build-only
- name: Tag and push generated image
run: |
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
echo "tag and push $TAG"
docker tag complement-synapse $TAG
docker push $TAG
done

View File

@@ -11,12 +11,11 @@ on:
# we do the full build on tags.
tags: ["v*"]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
@@ -25,10 +24,8 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
@@ -36,7 +33,7 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
echo "distros=$dists" >> "$GITHUB_OUTPUT"
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
@@ -52,18 +49,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -71,9 +68,7 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: '3.x'
uses: actions/setup-python@v2
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
@@ -89,96 +84,14 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: debs
path: debs/*
build-wheels:
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-11]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-11"
# Don't build aarch64 wheels on mac.
- os: "macos-11"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
arch: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Build aarch64 wheels
if: matrix.arch == 'aarch64'
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp3*-* *i686* *musl*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
name: Build sdist
runs-on: ubuntu-latest
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- run: pip install build
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
@@ -186,12 +99,11 @@ jobs:
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-wheels
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v2
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release

View File

@@ -4,85 +4,50 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
runs-on: ubuntu-latest
outputs:
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
steps:
- uses: dorny/paths-filter@v2
id: filter
# We only check on PRs
if: startsWith(github.ref, 'refs/pull/')
with:
filters: |
rust:
- 'rust/**'
- 'Cargo.toml'
- 'Cargo.lock'
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
check-lockfile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: .ci/scripts/check_lockfile.py
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
@@ -91,90 +56,19 @@ jobs:
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
lint-clippy:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy -- -D warnings
# We also lint against a nightly rustc so that we can lint the benchmark
# suite, which requires a nightly compiler.
lint-clippy-nightly:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --all-features -- -D warnings
lint-rustfmt:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- check-lockfile
- lint-clippy
- lint-rustfmt
needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta]
runs-on: ubuntu-latest
steps:
- run: "true"
@@ -184,10 +78,8 @@ jobs:
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
@@ -203,42 +95,27 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
# 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
# 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
run: |
docker run -d -p 5432:5432 \
--tmpfs /var/lib/postgres:rw,size=6144m \
--mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=6 tests
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: /var/run/postgresql
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
@@ -258,57 +135,16 @@ jobs:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
poetry-version: "1.3.2"
extras: "all test"
- run: poetry run trial -j6 tests
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -334,13 +170,12 @@ jobs:
extras: ["all"]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
@@ -378,19 +213,9 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
@@ -398,7 +223,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -428,32 +253,27 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "11"
postgres-version: "10"
- python-version: "3.11"
postgres-version: "15"
- python-version: "3.10"
postgres-version: "14"
services:
postgres:
@@ -470,38 +290,13 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v3
- name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags
# behind new releases, so we have to use the PostreSQL apt repository.
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
run: |
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
@@ -522,88 +317,34 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
name: Run Complement Tests
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
# We want to ensure that the cargo benchmarks still compile, which requires a
# nightly compiler.
cargo-bench:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2
- run: cargo bench --no-run
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- check-sampleconfig
- lint
- lint-crlf
- lint-newsfile
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
- cargo-test
- cargo-bench
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -611,8 +352,5 @@ jobs:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
# Cargo test is skipped if there is no changes on Rust code
skippable: |
skippable:
lint-newsfile
cargo-test
cargo-bench

View File

@@ -5,11 +5,24 @@ on:
types: [ opened ]
jobs:
triage:
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
with:
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
content_id: ${{ github.event.issue.node_id }}
secrets:
github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
add_new_issues:
name: Add new issues to the triage board
runs-on: ubuntu-latest
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation add_to_project($projectid:ID!,$contentid:ID!) {
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}

View File

@@ -11,34 +11,34 @@ jobs:
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: actions/add-to-project@main
id: add_project
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
project-url: "https://github.com/orgs/matrix-org/projects/67"
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
- name: Set status
env:
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
run: |
gh api graphql -f query='
mutation(
$project: ID!
$item: ID!
$fieldid: ID!
$columnid: String!
) {
updateProjectV2ItemFieldValue(
input: {
projectId: $project
itemId: $item
fieldId: $fieldid
value: {
singleSelectOptionId: $columnid
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation {
updateProjectV2ItemFieldValue(
input: {
projectId: $projectid
itemId: $contentid
fieldId: $fieldid
value: {
singleSelectOptionId: "Todo"
}
}
) {
projectV2Item {
id
}
) {
projectV2Item {
id
}
}
}
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
fieldid: ${{ env.FIELD_ID }}
optionid: ${{ env.OPTION_ID }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
OPTION_ID: "ba22e43c"

View File

@@ -15,14 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -39,15 +32,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -79,14 +65,7 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
@@ -109,7 +88,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -135,8 +114,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
@@ -148,11 +127,12 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.3.2
pipx install poetry==1.1.14
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
@@ -173,8 +153,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

14
.gitignore vendored
View File

@@ -15,9 +15,8 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry and cargo lockfile.
# We do want the poetry lockfile.
!poetry.lock
!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -36,7 +35,6 @@ __pycache__/
# For direnv users
/.envrc
.direnv/
# IDEs
/.idea/
@@ -62,13 +60,3 @@ book/
# complement
/complement-*
/master.tar.gz
# rust
/target/
/synapse/*.so
# Poetry will create a setup.py, which we don't want to include.
/setup.py
# Don't include users' poetry configs
/poetry.toml

View File

@@ -1 +0,0 @@
group_imports = "StdExternalCrate"

1217
CHANGES.md

File diff suppressed because it is too large Load Diff

466
Cargo.lock generated
View File

@@ -1,466 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
[[package]]
name = "arc-swap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
dependencies = [
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "digest"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "indoc"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "proc-macro2"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-log"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
dependencies = [
"arc-swap",
"log",
"pyo3",
]
[[package]]
name = "pyo3-macros"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pythonize"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
dependencies = [
"pyo3",
"serde",
]
[[package]]
name = "quote"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
"hex",
"lazy_static",
"log",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
]
[[package]]
name = "target-lexicon"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -1,5 +0,0 @@
# We make the whole Synapse folder a workspace so that we can run `cargo`
# commands from the root (rather than having to cd into rust/).
[workspace]
members = ["rust"]

View File

@@ -34,6 +34,14 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"

View File

@@ -1,23 +0,0 @@
# A build script for poetry that adds the rust extension.
import os
from typing import Any, Dict
from setuptools_rust import Binding, RustExtension
def build(setup_kwargs: Dict[str, Any]) -> None:
original_project_dir = os.path.dirname(os.path.realpath(__file__))
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
extension = RustExtension(
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False

View File

@@ -1 +0,0 @@
Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on.

View File

@@ -1 +0,0 @@
Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response.

View File

@@ -1 +0,0 @@
Refactor writing json data in `FileExfiltrationWriter`.

View File

@@ -1 +0,0 @@
Bump black from 22.12.0 to 23.1.0.

View File

@@ -1 +0,0 @@
Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data).

View File

@@ -1 +0,0 @@
Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider.

View File

@@ -1 +0,0 @@
Tighten the login ratelimit defaults.

View File

@@ -1 +0,0 @@
Correct reference to `federation_verify_certificates` in configuration documentation.

View File

@@ -1 +0,0 @@
Add a new `send_federation_http_request` method to the Module API to allow Synapse modules to make matrix federation requests over HTTP.

View File

@@ -1,28 +0,0 @@
# Schema symlinks
This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc.
In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against).
## Example
![](datagrip-aware-of-schema.png)
## Caveats
- Doesn't include temporary tables created ad-hoc by Synapse.
- Postgres only. IDEs will likely be confused by SQLite-specific queries.
- Will not include migrations created after the latest schema dump.
- Symlinks might confuse checkouts on Windows systems.
## Instructions
### Jetbrains IDEs with DataGrip plugin
- View -> Tool Windows -> Database
- `+` Icon -> DDL Data Source
- Pick a name, e.g. `Synapse schema dump`
- Under sources, click `+`.
- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL.
- OK, and OK.
- IDE should now be aware of the schema.
- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`.

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/common/schema_version.sql

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres

View File

@@ -68,7 +68,6 @@ redis:
enabled: true
host: redis
port: 6379
# dbid: <redis_logical_db_id>
# password: <secret_password>
```
@@ -95,6 +94,20 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Add Workers to `instance_map`
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
```yaml
instance_map:
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
port: 8034 # The port assigned to the replication listener in your worker config file
synapse-federation-sender-1:
host: synapse-federation-sender-1
port: 8034
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
@@ -109,4 +122,4 @@ federation_sender_instances:
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -5,4 +5,10 @@ worker_name: synapse-federation-sender-1
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
worker_log_config: /data/federation_sender.log.config

View File

@@ -6,6 +6,10 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8034
resources:
- names: [replication]
- type: http
port: 8081
x_forwarded: true

File diff suppressed because it is too large Load Diff

View File

@@ -1,47 +0,0 @@
# `lnav` config for Synapse logs
[lnav](https://lnav.org/) is a log-viewing tool. It is particularly useful when
you need to interleave multiple log files, or for exploring a large log file
with regex filters. The downside is that it is not as ubiquitous as tools like
`less`, `grep`, etc.
This directory contains an `lnav` [log format definition](
https://docs.lnav.org/en/v0.10.1/formats.html#defining-a-new-format
) for Synapse logs as
emitted by Synapse with the default [logging configuration](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#log_config
). It supports lnav 0.10.1 because that's what's packaged by my distribution.
This should allow lnav:
- to interpret timestamps, allowing log interleaving;
- to interpret log severity levels, allowing colouring by log level(!!!);
- to interpret request IDs, allowing you to skip through a specific request; and
- to highlight room, event and user IDs in logs.
See also https://gist.github.com/benje/e2ab750b0a81d11920d83af637d289f7 for a
similar example.
## Example
[![asciicast](https://asciinema.org/a/556133.svg)](https://asciinema.org/a/556133)
## Tips
- `lnav -i /path/to/synapse/checkout/contrib/lnav/synapse-log-format.json`
- `lnav my_synapse_log_file` or `lnav synapse_log_files.*`, etc.
- `lnav --help` for CLI help.
Within lnav itself:
- `?` for help within lnav itself.
- `q` to quit.
- `/` to search a-la `less` and `vim`, then `n` and `N` to continue searching
down and up.
- Use `o` and `O` to skip through logs based on the request ID (`POST-1234`, or
else the value of the [`request_id_header`](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=request_id_header#listeners
) header). This may get confused if the same request ID is repeated among
multiple files or process restarts.
- ???
- Profit

View File

@@ -1,67 +0,0 @@
{
"$schema": "https://lnav.org/schemas/format-v1.schema.json",
"synapse": {
"title": "Synapse logs",
"description": "Logs output by Synapse, a Matrix homesever, under its default logging config.",
"regex": {
"log": {
"pattern": ".*(?<timestamp>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) - (?<logger>.+) - (?<lineno>\\d+) - (?<level>\\w+) - (?<context>.+) - (?<body>.*)"
}
},
"json": false,
"timestamp-field": "timestamp",
"timestamp-format": [
"%Y-%m-%d %H:%M:%S,%L"
],
"level-field": "level",
"body-field": "body",
"opid-field": "context",
"level": {
"critical": "CRITICAL",
"error": "ERROR",
"warning": "WARNING",
"info": "INFO",
"debug": "DEBUG"
},
"sample": [
{
"line": "my-matrix-server-generic-worker-4 | 2023-01-27 09:47:09,818 - synapse.replication.tcp.client - 381 - ERROR - PUT-32992 - Timed out waiting for stream receipts",
"level": "error"
},
{
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{
"line": "my-matrix-server | 2023-01-25 20:55:54,433 - synapse.storage.databases - 66 - INFO - main - [database config 'master']: Checking database server",
"level": "info"
},
{
"line": "my-matrix-server | 2023-01-26 15:08:40,447 - synapse.access.http.8008 - 460 - INFO - PUT-74929 - 0.0.0.0 - 8008 - {@alice:example.com} Processed request: 0.011sec/0.000sec (0.000sec, 0.000sec) (0.001sec/0.008sec/3) 2B 200 \"PUT /_matrix/client/r0/user/%40alice%3Atexample.com/account_data/im.vector.setting.breadcrumbs HTTP/1.0\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Element/1.11.20 Chrome/108.0.5359.179 Electron/22.0.3 Safari/537.36\" [0 dbevts]",
"level": "info"
}
],
"highlights": {
"user_id": {
"pattern": "(@|%40)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_id": {
"pattern": "(!|%21)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_alias": {
"pattern": "(#|%23)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v1_v2": {
"pattern": "(\\$|%25)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v3_plus": {
"pattern": "(\\$|%25)([A-Za-z0-9+/_]|-){43}",
"underline": true
}
}
}
}

View File

@@ -0,0 +1,21 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0

View File

@@ -1,20 +1,37 @@
groups:
- name: synapse
rules:
- record: "synapse_federation_transaction_queue_pendingEdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
- record: 'synapse_http_server_request_count:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- record: 'synapse_cache:hit_ratio_30s'
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
###
### Prometheus Console Only
### The following rules are only needed if you use the Prometheus Console
### in contrib/prometheus/consoles/synapse.html
###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus_total + 0'
expr: 'synapse_federation_client_sent_edus + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
@@ -23,11 +40,11 @@ groups:
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus_total + 0'
expr: 'synapse_federation_server_received_edus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus_total + 0'
expr: 'synapse_federation_server_received_pdus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
@@ -41,34 +58,21 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
###
### End of 'Prometheus Console Only' rules block
###
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###
expr: sum without(type) (synapse_storage_events_persisted_events_sep)

View File

@@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
#!/bin/bash
for i in {1..5}
do
cat << EOF > generic_worker$i.yaml
cat << EOF >> generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
@@ -18,16 +18,14 @@ worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 808$i
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
#worker_pid_file: DATADIR/generic_worker$i.pid
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
Customise the script to your needs.

View File

@@ -8,9 +8,7 @@ It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
@@ -48,11 +46,9 @@ worker_listeners:
- type: http
port: $(expr $HTTP_START_PORT + $i)
x_forwarded: true
resources:
- names: [client]
#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
@@ -95,9 +91,7 @@ Simply run the script to create YAML files in the current folder and print out t
```console
$ ./create_stream_writers.sh
```
You should receive an output similar to the following:
```console
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.

View File

@@ -31,11 +31,12 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.3.2
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \
@@ -60,7 +61,7 @@ dh_virtualenv \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
@@ -77,14 +78,9 @@ case "$DEB_BUILD_OPTIONS" in
cp -r tests "$tmpdir"
# To avoid pulling in the unbuilt Synapse in the local directory
pushd /
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
popd
;;
esac

202
debian/changelog vendored
View File

@@ -1,201 +1,3 @@
matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium
* Add `matrix-org-archive-keyring` package as recommended.
* New Synapse release 1.78.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Feb 2023 14:29:19 +0000
matrix-synapse-py3 (1.77.0) stable; urgency=medium
* New Synapse release 1.77.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Feb 2023 12:59:02 +0100
matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium
* New Synapse release 1.77.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Feb 2023 12:44:21 +0000
matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium
* New Synapse release 1.77.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Feb 2023 13:45:14 +0000
matrix-synapse-py3 (1.76.0) stable; urgency=medium
* New Synapse release 1.76.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Jan 2023 08:21:47 -0800
matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium
* New Synapse release 1.76.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 Jan 2023 11:17:57 +0000
matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium
* Use Poetry 1.3.2 to manage the bundled virtualenv included with this package.
* New Synapse release 1.76.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 25 Jan 2023 16:21:16 +0000
matrix-synapse-py3 (1.75.0) stable; urgency=medium
* New Synapse release 1.75.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jan 2023 11:36:02 +0000
matrix-synapse-py3 (1.75.0~rc2) stable; urgency=medium
* New Synapse release 1.75.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 12 Jan 2023 10:30:15 -0800
matrix-synapse-py3 (1.75.0~rc1) stable; urgency=medium
* New Synapse release 1.75.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Jan 2023 12:18:27 +0000
matrix-synapse-py3 (1.74.0) stable; urgency=medium
* New Synapse release 1.74.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
* New dependency on libicu-dev to provide improved results for user
search.
* New Synapse release 1.74.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
matrix-synapse-py3 (1.73.0) stable; urgency=medium
* New Synapse release 1.73.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Dec 2022 11:48:56 +0000
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
* New Synapse release 1.73.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
* New Synapse release 1.73.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
matrix-synapse-py3 (1.72.0) stable; urgency=medium
* New Synapse release 1.72.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
* New Synapse release 1.72.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
matrix-synapse-py3 (1.71.0) stable; urgency=medium
* New Synapse release 1.71.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
* New Synapse release 1.71.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
* New Synapse release 1.71.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
matrix-synapse-py3 (1.70.1) stable; urgency=medium
* New Synapse release 1.70.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
matrix-synapse-py3 (1.70.0) stable; urgency=medium
* New Synapse release 1.70.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
* New Synapse release 1.70.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
* New Synapse release 1.70.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
matrix-synapse-py3 (1.69.0) stable; urgency=medium
* New Synapse release 1.69.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Oct 2022 11:31:03 +0100
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
* New Synapse release 1.69.0rc4.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
* New Synapse release 1.69.0rc3.
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
* New Synapse release 1.69.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Oct 2022 14:45:00 +0100
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
* The man page for the hash_password script has been updated to reflect
the correct default value of 'bcrypt_rounds'.
* New Synapse release 1.69.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
matrix-synapse-py3 (1.68.0) stable; urgency=medium
* New Synapse release 1.68.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Sep 2022 12:02:09 +0100
matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
* New Synapse release 1.68.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
* New Synapse release 1.68.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
matrix-synapse-py3 (1.67.0) stable; urgency=medium
* New Synapse release 1.67.0.
@@ -220,15 +22,11 @@ matrix-synapse-py3 (1.66.0) stable; urgency=medium
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
[ Jörg Behrmann ]
* Update debhelper to compatibility level 12.
* Drop the preinst script stopping synapse.
* Allocate a group for the system user.
* Change dpkg-statoverride to --force-statoverride-add.
[ Erik Johnston ]
* Disable `dh_auto_configure` as it broke during Rust build.
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium

3
debian/control vendored
View File

@@ -8,8 +8,6 @@ Build-Depends:
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
libicu-dev,
pkg-config,
lsb-release,
python3-dev,
python3,
@@ -37,7 +35,6 @@ Depends:
# so we put perl:Depends in Suggests rather than Depends.
Recommends:
${shlibs1:Recommends},
matrix-org-archive-keyring,
Suggests:
sqlite3,
${perl:Depends},

View File

@@ -10,7 +10,7 @@
.P
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
.P
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
.P
The hashed password is written on the \fBSTDOUT\fR\.
.SH "FILES"

View File

@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
It accepts an YAML file which can be used to specify parameters like the
number of rounds for bcrypt and password_config section having the pepper
value used for the hashing. By default `bcrypt_rounds` is set to **12**.
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
The hashed password is written on the `STDOUT`.

2
debian/rules vendored
View File

@@ -12,8 +12,6 @@ override_dh_installsystemd:
# we don't really want to strip the symbols from our object files.
override_dh_strip:
override_dh_auto_configure:
# many libraries pulled from PyPI have allocatable sections after
# non-allocatable ones on which dwz errors out. For those without the issue the
# gains are only marginal

View File

@@ -17,33 +17,37 @@
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.11
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential cargo git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry==1.3.2"
pip install --user "poetry==1.2.0"
WORKDIR /synapse
@@ -64,51 +68,34 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
touch /synapse/requirements.txt; \
touch /synapse/requirements.txt; \
fi
###
### Stage 1: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
rustc \
zlib1g-dev \
git \
&& rm -rf /var/lib/apt/lists/*
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
@@ -121,9 +108,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
COPY pyproject.toml README.rst /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
@@ -131,19 +117,17 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
fi
###
### Stage 2: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
FROM docker.io/python:${PYTHON_VERSION}-slim
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
@@ -151,20 +135,19 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
@@ -175,4 +158,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -36,10 +36,8 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
# Debian sid. TODO: Switch back to upstream once https://github.com/spotify/dh-virtualenv/pull/354 has merged.
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/matrix-org/dh-virtualenv/archive/refs/tags/matrixorg-2023010302.tar.gz
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
# install its build deps. We do another apt-cache-update here, because we might
@@ -74,7 +72,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
@@ -86,19 +83,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-venv \
sqlite3 \
libpq-dev \
libicu-dev \
pkg-config \
xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a

View File

@@ -1,7 +1,6 @@
# syntax=docker/dockerfile:1
ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
@@ -24,7 +23,7 @@ FROM debian:bullseye-slim AS deps_base
FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
@@ -41,11 +40,7 @@ FROM $FROM
COPY --from=deps_base /etc/nginx /etc/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
RUN chown www-data /var/lib/nginx
# have nginx log to stderr/out
RUN ln -sf /dev/stdout /var/log/nginx/access.log
RUN ln -sf /dev/stderr /var/log/nginx/error.log
RUN chown www-data /var/log/nginx /var/lib/nginx
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/

View File

@@ -241,4 +241,4 @@ healthcheck:
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse
[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).

View File

@@ -7,31 +7,36 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
# first of all, we create a base image with a postgres server and database,
# which we can copy into the target image. For repeated rebuilds, this is
# much faster than apt installing postgres each time.
#
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
FROM postgres:13-bullseye AS postgres_base
# initialise the database cluster in /var/lib/postgresql
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
# Configure a password and create a database for Synapse
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.

View File

@@ -6,7 +6,7 @@ set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
@@ -45,12 +45,7 @@ esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
# utilizing WORKERS=1 for backwards compatibility.
# -n True if the length of string is non-zero.
# -z True if the length of string is zero.
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
export SYNAPSE_WORKER_TYPES="\
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
@@ -62,12 +57,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
federation_reader, \
federation_sender, \
synchrotron, \
client_reader, \
appservice, \
pusher"
fi
log "Workers requested: $SYNAPSE_WORKER_TYPES"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
@@ -76,17 +68,6 @@ else
fi
if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then
if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then
export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1"
else
export SYNAPSE_ASYNC_IO_REACTOR="1"
fi
else
export SYNAPSE_ASYNC_IO_REACTOR="0"
fi
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then

View File

@@ -12,8 +12,6 @@ trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
url_preview_enabled: true
url_preview_ip_range_blacklist: []
## Registration ##
@@ -92,18 +90,18 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
{% if not workers_in_use %}
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
msc3381_polls_enabled: true
# Enable deleting device-specific notification settings stored in account data
msc3890_enabled: true
# Enable removing account data support
msc3391_enabled: true
# Filtering /messages by relation type.
msc3874_enabled: true
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server

View File

@@ -20,7 +20,7 @@
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers.
# below. Leave empty for no workers, or set to '*' for all possible workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -39,7 +39,6 @@
# continue to work if so.
import os
import platform
import subprocess
import sys
from pathlib import Path
@@ -50,18 +49,13 @@ from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
# Watching /_matrix/client needs a "client" listener
# Watching /_matrix/federation needs a "federation" listener
# Watching /_matrix/media and related needs a "media" listener
# Stream Writers require "client" and "replication" listeners because they
# have to attach by instance_map to the master process and have client endpoints.
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.pusher",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {},
"shared_extra_conf": {"start_pushers": False},
"worker_extra_conf": "",
},
"user_dir": {
@@ -84,11 +78,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
# The first configured media worker will run the media background jobs
"shared_extra_conf": {
"enable_media_repo": False,
"media_instance_running_background_jobs": "media_repository1",
},
"shared_extra_conf": {"enable_media_repo": False},
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
@@ -99,10 +89,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"federation_sender": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.federation_sender",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {},
"shared_extra_conf": {"send_federation": False},
"worker_extra_conf": "",
},
"synchrotron": {
@@ -117,35 +107,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"client_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
"^/_matrix/client/v1/rooms/.*/hierarchy$",
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
"^/_matrix/client/v1/rooms/.*/threads$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
"^/_matrix/client/versions$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
"^/_matrix/client/(r0|v3|unstable)/register$",
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
@@ -164,7 +125,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/federation/(v1|v2)/invite/",
"^/_matrix/federation/(v1|v2)/query_auth/",
"^/_matrix/federation/(v1|v2)/event_auth/",
"^/_matrix/federation/v1/timestamp_to_event/",
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
"^/_matrix/federation/(v1|v2)/user/devices/",
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
@@ -211,54 +171,14 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"frontend_proxy": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.frontend_proxy",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"account_data": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": [
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"presence": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"receipts": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": [
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"to_device": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"typing": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
],
"shared_extra_conf": {},
"worker_extra_conf": "",
"worker_extra_conf": (
"worker_main_http_uri: http://127.0.0.1:%d"
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
),
},
}
@@ -281,19 +201,24 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
"""Log something to the stdout.
Args:
txt: The text to log.
"""
print(txt)
def error(txt: str) -> NoReturn:
print(txt, file=sys.stderr)
"""Log something and exit with an error code.
Args:
txt: The text to log in error.
"""
log(txt)
sys.exit(2)
def flush_buffers() -> None:
sys.stdout.flush()
sys.stderr.flush()
def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template
@@ -322,14 +247,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
outfile.write(rendered)
def add_worker_roles_to_shared_config(
def add_sharding_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
append appropriate worker information to it for the current worker_type instance.
append sharded worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
@@ -360,19 +285,9 @@ def add_worker_roles_to_shared_config(
"port": worker_port,
}
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
# Update the list of stream writers
# It's convenient that the name of the worker type is the same as the stream to write
shared_config.setdefault("stream_writers", {}).setdefault(
worker_type, []
).append(worker_name)
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
elif worker_type == "media_repository":
# The first configured media worker will run the media background jobs
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
def generate_base_homeserver_config() -> None:
@@ -384,7 +299,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
def generate_worker_files(
@@ -458,8 +373,8 @@ def generate_worker_files(
# No workers, just the main process
worker_types = []
else:
# Split type names by comma, ignoring whitespace.
worker_types = [x.strip() for x in worker_types_env.split(",")]
# Split type names by comma
worker_types = worker_types_env.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -478,11 +393,14 @@ def generate_worker_files(
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_type = worker_type.strip()
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
error(worker_type + " is an unknown worker type! Please fix!")
log(worker_type + " is an unknown worker type! It will be ignored")
continue
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
@@ -501,11 +419,11 @@ def generate_worker_files(
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
shared_config, worker_type, worker_name, worker_port
)
if worker_type_total_count > 1:
# Update the shared config with sharding-related options if necessary
add_sharding_to_shared_config(
shared_config, worker_type, worker_name, worker_port
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
@@ -686,24 +604,14 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
with open(mark_filepath, "w") as f:
f.write("")
# Lifted right out of start.py
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log("Could not find %s, will not use" % (jemallocpath,))
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
flush_buffers()
os.execle(
os.execl(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
environ,
)

View File

@@ -1,75 +0,0 @@
# syntax=docker/dockerfile:1
# This dockerfile builds an editable install of Synapse.
#
# Used by `complement.sh`. Not suitable for production use.
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
# Install Rust and other dependencies (stolen from normal Dockerfile)
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
&& rm -rf /var/lib/apt/lists/*
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# Make a base copy of the editable source tree, so that we have something to
# install and build now — even though it's going to be covered up by a mount
# at runtime.
COPY synapse /editable-src/synapse/
COPY rust /editable-src/rust/
# ... and what we need to `pip install`.
COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
RUN pip install poetry
RUN poetry config virtualenvs.create false
RUN cd /editable-src && poetry install --extras all
# Make copies of useful things for inspection:
# - the Rust module (must be copied to the editable source tree before startup)
# - poetry.lock is useful for checking if dependencies have changed.
RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
RUN cp /editable-src/poetry.lock /poetry.lock.bak
### Extra setup from original Dockerfile
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -13,19 +13,14 @@ import jinja2
# Utility functions
def log(txt: str) -> None:
print(txt)
print(txt, file=sys.stderr)
def error(txt: str) -> NoReturn:
print(txt, file=sys.stderr)
log(txt)
sys.exit(2)
def flush_buffers() -> None:
sys.stdout.flush()
sys.stderr.flush()
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -136,10 +131,10 @@ def generate_config_from_template(
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
subprocess.run(args, check=True)
subprocess.check_output(args)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
@@ -163,7 +158,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
subprocess.run(["chown", ownership, data_dir], check=True)
subprocess.check_output(["chown", ownership, data_dir])
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -190,7 +185,6 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
flush_buffers()
os.execv(sys.executable, args)
@@ -273,10 +267,8 @@ running with 'migrate_config'. See the README for more details.
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
flush_buffers()
os.execve("/usr/sbin/gosu", args, environ)
else:
flush_buffers()
os.execve(sys.executable, args, environ)

View File

@@ -9,8 +9,6 @@
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
- [coturn TURN server](setup/turn/coturn.md)
- [eturnal TURN server](setup/turn/eturnal.md)
- [Delegation](delegate.md)
# Upgrading
@@ -97,7 +95,6 @@
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)
- [Single Sign-On]()
- [SAML](development/saml.md)

View File

@@ -5,7 +5,7 @@ use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Renew account

View File

@@ -3,7 +3,7 @@
This API returns information about reported events.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The api is:
```

View File

@@ -6,7 +6,7 @@ Details about the format of the `media_id` and storage of the media in the file
are documented under [media repository](../media_repository.md).
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## List all media in a room
@@ -235,14 +235,6 @@ The following fields are returned in the JSON response body:
Request:
```
POST /_synapse/admin/v1/media/delete?before_ts=<before_ts>
{}
```
*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint:
```
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
@@ -251,7 +243,7 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.*
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
Files that were last used before this timestamp will be deleted. It is the timestamp of
last access, not the timestamp when the file was created.

View File

@@ -11,7 +11,7 @@ Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:

View File

@@ -5,7 +5,7 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
in the homeserver configuration), and a one-time nonce. If the registration
shared secret is not configured, this API is not enabled.

View File

@@ -6,7 +6,7 @@ local users. The server administrator must be in the room and have permission to
invite users.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Parameters

View File

@@ -5,7 +5,7 @@ server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
**Parameters**
@@ -393,151 +393,6 @@ A response body like the following is returned:
}
```
# Room Messages API
The Room Messages admin API allows server admins to get all messages
sent to a room in a given timeframe. There are various parameters available
that allow for filtering and ordering the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/messages
```
**Parameters**
The following path parameters are required:
* `room_id` - The ID of the room you wish you fetch messages from.
The following query parameters are available:
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
* `to` - The token to spot returning events at.
* `limit` - The maximum number of events to return. Defaults to `10`.
* `filter` - A JSON RoomEventFilter to filter returned events with.
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
this value to `b` will reverse the above sort order. Defaults to `f`.
**Response**
The following fields are possible in the JSON response body:
* `chunk` - A list of room events. The order depends on the dir parameter.
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
If no further events are available, this property is omitted from the response.
* `start` - A token corresponding to the start of chunk.
* `state` - A list of state events relevant to showing the chunk.
**Example**
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
```json
{
"chunk": [
{
"content": {
"body": "This is an example text message",
"format": "org.matrix.custom.html",
"formatted_body": "<b>This is an example text message</b>",
"msgtype": "m.text"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"type": "m.room.message",
"unsigned": {
"age": 1234
}
},
{
"content": {
"name": "The room name"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"state_key": "",
"type": "m.room.name",
"unsigned": {
"age": 1234
}
},
{
"content": {
"body": "Gangnam Style",
"info": {
"duration": 2140786,
"h": 320,
"mimetype": "video/mp4",
"size": 1563685,
"thumbnail_info": {
"h": 300,
"mimetype": "image/jpeg",
"size": 46144,
"w": 300
},
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
"w": 480
},
"msgtype": "m.video",
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"type": "m.room.message",
"unsigned": {
"age": 1234
}
}
],
"end": "t47409-4357353_219380_26003_2265",
"start": "t47429-4392820_219380_26003_2265"
}
```
# Room Timestamp to Event API
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
Useful for cases like jump to date so you can start paginating messages from
a given date in the archive.
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
```
**Parameters**
The following path parameters are required:
* `room_id` - The ID of the room you wish to check.
The following query parameters are available:
* `ts` - a timestamp in milliseconds where we will find the closest event in
the given direction.
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp. Defaults to `f`.
**Response**
* `event_id` - converted from timestamp
# Block Room API
The Block Room admin API allows server admins to block and unblock rooms,
and query to see if a given room is blocked.

View File

@@ -4,7 +4,7 @@ Returns information about all local media usage of users. Gives the
possibility to filter them by time and user.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:

View File

@@ -1,7 +1,7 @@
# User Admin API
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Query User Account
@@ -37,13 +37,11 @@ It returns a JSON body like the following:
"is_guest": 0,
"admin": 0,
"deactivated": 0,
"erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
"consent_version": null,
"consent_ts": null,
"external_ids": [
{
"auth_provider": "<provider1>",
@@ -168,7 +166,6 @@ A response body like the following is returned:
"admin": 0,
"user_type": null,
"deactivated": 0,
"erased": false,
"shadow_banned": 0,
"displayname": "<User One>",
"avatar_url": null,
@@ -179,7 +176,6 @@ A response body like the following is returned:
"admin": 1,
"user_type": null,
"deactivated": 0,
"erased": false,
"shadow_banned": 0,
"displayname": "<User Two>",
"avatar_url": "<avatar_url>",
@@ -250,7 +246,6 @@ The following fields are returned in the JSON response body:
- `user_type` - string - Type of the user. Normal users are type `None`.
This allows user type specific behaviour. There are also types `support` and `bot`.
- `deactivated` - bool - Status if that user has been marked as deactivated.
- `erased` - bool - Status if that user has been marked as erased.
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
@@ -369,7 +364,6 @@ The following actions are **NOT** performed. The list may be incomplete.
- Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users
- Remove user's consent information (consent version and timestamp)
## Reset password
@@ -1159,80 +1153,3 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
### Find a user based on their ID in an auth provider
The API is:
```
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
```
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
```json
{
"user_id": "@hello:example.org"
}
```
**Parameters**
The following parameters should be set in the URL:
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
**Errors**
Returns a `404` HTTP status code if no user was found, with a response body like this:
```json
{
"errcode":"M_NOT_FOUND",
"error":"User not found"
}
```
_Added in Synapse 1.68.0._
### Find a user based on their Third Party ID (ThreePID or 3PID)
The API is:
```
GET /_synapse/admin/v1/threepid/$medium/users/$address
```
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
```json
{
"user_id": "@hello:example.org"
}
```
**Parameters**
The following parameters should be set in the URL:
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
- `address` - Value of the third-party ID.
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
**Errors**
Returns a `404` HTTP status code if no user was found, with a response body like this:
```json
{
"errcode":"M_NOT_FOUND",
"error":"User not found"
}
```
_Added in Synapse 1.72.0._

View File

@@ -15,7 +15,6 @@ app_service_config_files:
The format of the AS configuration file is as follows:
```yaml
id: <your-AS-id>
url: <base url of AS>
as_token: <token AS will add to requests to HS>
hs_token: <token HS will add to requests to AS>

View File

@@ -10,17 +10,26 @@ The necessary tools are:
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
on how to install the above tools and run the linters.
Install them with:
```sh
pip install -e ".[lint,mypy]"
```
The easiest way to run the lints is to invoke the linter script as follows.
```sh
scripts-dev/lint.sh
```
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient
development workflow. It is not, however, recommended to run `mypy`
on save as it takes a while and can be very resource intensive.
development workflow. It is not, however, recommended to run `flake8` or `mypy`
on save as they take a while and can be very resource intensive.
## General rules

View File

@@ -73,15 +73,6 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge
not recommended, as it can be difficult to configure the TLS certificates correctly in
this case, and it offers little advantage over `.well-known` delegation.
Please keep in mind that server delegation is a function of server-server communication,
and as such using SRV DNS records will not cover use cases involving client-server comms.
This means setting global client settings (such as a Jitsi endpoint, or disabling
creating new rooms as encrypted by default, etc) will still require that you serve a file
from the `https://<server_name>/.well-known/` endpoints defined in the spec! If you are
considering using SRV DNS delegation to avoid serving files from this endpoint, consider
the impact that you will not be able to change those client-based default values globally,
and will be relegated to the featureset of the configuration of each individual client.
However, if you really need it, you can find some documentation on what such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).

View File

@@ -1,9 +1,9 @@
Deprecation Policy for Platform Dependencies
============================================
Synapse has a number of platform dependencies, including Python, Rust,
PostgreSQL and SQLite. This document outlines the policy towards which versions
we support, and when we drop support for versions in the future.
Synapse has a number of platform dependencies, including Python and PostgreSQL.
This document outlines the policy towards which versions we support, and when we
drop support for versions in the future.
Policy
@@ -17,14 +17,6 @@ Details on the upstream support life cycles for Python and PostgreSQL are
documented at [https://endoflife.date/python](https://endoflife.date/python) and
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
A Rust compiler is required to build Synapse from source. For any given release
the minimum required version may be bumped up to a recent Rust version, and so
people building from source should ensure they can fetch recent versions of Rust
(e.g. by using [rustup](https://rustup.rs/)).
The oldest supported version of SQLite is the version
[provided](https://packages.debian.org/buster/libsqlite3-0) by
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context
-------
@@ -39,15 +31,3 @@ long process.
By following the upstream support life cycles Synapse can ensure that its
dependencies continue to get security patches, while not requiring system admins
to constantly update their platform dependencies to the latest versions.
For Rust, the situation is a bit different given that a) the Rust foundation
does not generally support older Rust versions, and b) the library ecosystem
generally bump their minimum support Rust versions frequently. In general, the
Synapse team will try to avoid updating the dependency on Rust to the absolute
latest version, but introducing a formal policy is hard given the constraints of
the ecosystem.
On a similar note, SQLite does not generally have a concept of "supported
release"; bugfixes are published for the latest minor release only. We chose to
track Debian's oldstable as this is relatively conservative, predictably updated
and is consistent with the `.deb` packages released by Matrix.org.

View File

@@ -24,15 +24,10 @@ The code of Synapse is written in Python 3. To do pretty much anything, you'll n
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
A recent version of the Rust compiler is needed to build the native modules. The
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
# 3. Get the source.
@@ -67,7 +62,7 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Developing Synapse requires Poetry version 1.3.2 or later.
Synapse requires Poetry version 1.2.0 or later.
Next, open a terminal and install dependencies as follows:
@@ -78,19 +73,6 @@ poetry install --extras all
This will install the runtime and developer dependencies for the project.
## Running Synapse via poetry
To start a local instance of Synapse in the locked poetry environment, create a config file:
```sh
cp docs/sample_config.yaml homeserver.yaml
```
Now edit homeserver.yaml, and run Synapse with:
```sh
poetry run python -m synapse.app.homeserver -c homeserver.yaml
```
# 5. Get in touch.
@@ -119,8 +101,8 @@ regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
Synapse's code style is documented [here](../code_style.md). Please follow
it, including the conventions for [configuration
options and documentation](../code_style.md#configuration-code-and-documentation-format).
it, including the conventions for the [sample configuration
file](../code_style.md#configuration-file-format).
We welcome improvements and additions to our documentation itself! When
writing new pages, please
@@ -132,14 +114,9 @@ Some documentation also exists in [Synapse's GitHub
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
When changes are made to any Rust code then you must call either `poetry install`
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
is quicker than `poetry install`, so is recommended when making frequent
changes to the Rust code.
# 8. Test, test, test!
<a name="test-test-test" id="test-test-test"></a>
<a name="test-test-test"></a>
While you're developing and before submitting a patch, you'll
want to test your code.
@@ -182,12 +159,6 @@ was broken. They are slower than the linters but will typically catch more error
poetry run trial tests
```
You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
```sh
poetry run trial -j4 tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
@@ -224,7 +195,7 @@ The database file can then be inspected with:
sqlite3 _trial_temp/test.db
```
Note that the database file is cleared at the beginning of each test run. Thus it
Note that the database file is cleared at the beginning of each test run. Thus it
will always only contain the data generated by the *last run test*. Though generally
when debugging, one is only running a single test anyway.
@@ -339,13 +310,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
- If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
types you wish to test. A simple comma-delimited string containing the worker types
defined from the `WORKERS_CONFIG` template in
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -355,7 +319,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
You can then use this incantation to format the tests appropriately:
@@ -396,7 +360,7 @@ To prepare a Pull Request, please:
## Changelog
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by [Towncrier](https://github.com/twisted/towncrier).
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the `changelog.d` directory named
in the format of `PRnumber.type`. The type can be one of the following:
@@ -412,7 +376,7 @@ This file will become part of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
release, so the content of the file should be a short description of your
change in the same style as the rest of the changelog. The file can contain Markdown
formatting, and must end with a full stop (.) or an exclamation mark (!) for
formatting, and should end with a full stop (.) or an exclamation mark (!) for
consistency.
Adding credits to the changelog is encouraged, we value your
@@ -438,7 +402,8 @@ chicken-and-egg problem.
There are two options for solving this:
1. Open the PR without a changelog file, see what number you got, and *then*
add the changelog file to your branch, or:
add the changelog file to your branch (see [Updating your pull
request](#updating-your-pull-request)), or:
1. Look at the [list of all
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the

View File

@@ -195,24 +195,23 @@ There are three separate aspects to this:
## `event_id` global uniqueness
`event_id`'s can be considered globally unique although there has been a lot of
debate on this topic in places like
[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01). There are several places in Synapse
and even in the Matrix APIs like [`GET
In room versions `1` and `2` it's possible to end up with two events with the
same `event_id` (in the same or different rooms). After room version `3`, that
can only happen with a hash collision, which we basically hope will never
happen.
There are several places in Synapse and even Matrix APIs like [`GET
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
where we assume that event IDs are globally unique.
When scoping `event_id` in a database schema, it is often nice to accompany it
with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
makes it very easy to find and clean up everything in a room when it needs to be
purged (no need to use sub-`select` query or join from the `events` table).
A note on collisions: In room versions `1` and `2` it's possible to end up with
two events with the same `event_id` (in the same or different rooms). After room
version `3`, that can only happen with a hash collision, which we basically hope
will never happen (SHA256 has a massive big key space).
But hash collisions are still possible, and by treating event IDs as room
scoped, we can reduce the possibility of a hash collision. When scoping
`event_id` in the database schema, it should be also accompanied by `room_id`
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
`(room_id, event_id)`.
There has been a lot of debate on this in places like
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01).

View File

@@ -2,13 +2,6 @@
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Installing
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
Developers should use Poetry 1.3.2 or higher. If you encounter problems related
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
@@ -130,24 +123,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
## ...reset my venv to the locked environment?
```shell
poetry install --all-extras --sync
```
## ...delete everything and start over from scratch?
```shell
# Stop the current virtualenv if active
$ deactivate
# Remove all of the files from the current environment.
# Don't worry, even though it says "all", this will only
# remove the Poetry virtualenvs for the current project.
$ poetry env remove --all
# Reactivate Poetry shell to create the virtualenv again
$ poetry shell
# Install everything again
$ poetry install --extras all
poetry install --extras all --remove-untracked
```
## ...run a command in the `poetry` virtualenv?
@@ -190,6 +166,7 @@ Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
@@ -202,7 +179,7 @@ poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
@@ -246,6 +223,9 @@ poetry export --extras all
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
@@ -258,26 +238,12 @@ because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.
## ...handle a Dependabot pull request?
Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it
creates a pull request a GitHub Action will run to automatically create a changelog
file. Ensure that:
* the lockfile changes look reasonable;
* the upstream changelog file (linked in the description) doesn't include any
breaking changes;
* continuous integration passes (due to permissions, the GitHub Actions run on
the changelog commit will fail, look at the initial commit of the pull request);
In particular, any updates to the type hints (usually packages which start with `types-`)
should be safe to merge if linting passes.
# Troubleshooting
## Check the version of poetry with `poetry --version`.
The minimum version of poetry supported by Synapse is 1.3.2.
The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
@@ -290,16 +256,6 @@ from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
## Remove outdated egg-info
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
install.
This stores some cached information about dependencies and often conflicts with
letting Poetry do the right thing.
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.

View File

@@ -1,375 +0,0 @@
# How do faster joins work?
This is a work-in-progress set of notes with two goals:
- act as a reference, explaining how Synapse implements faster joins; and
- record the rationale behind our choices.
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
## Overview: processing events in a partially-joined room
The response to a partial join consists of
- the requested join event `J`,
- a list of the servers in the room (according to the state before `J`),
- a subset of the state of the room before `J`,
- the full auth chain of that state subset.
Synapse marks the room as partially joined by adding a row to the database table
`partial_state_rooms`. It also marks the join event `J` as "partially stated",
meaning that we have neither received nor computed the full state before/after
`J`. This is done by adding a row to `partial_state_events`.
<details><summary>DB schema</summary>
```
matrix=> \d partial_state_events
Table "matrix.partial_state_events"
Column │ Type │ Collation │ Nullable │ Default
══════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
event_id │ text │ │ not null │
matrix=> \d partial_state_rooms
Table "matrix.partial_state_rooms"
Column │ Type │ Collation │ Nullable │ Default
════════════════════════╪════════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
device_lists_stream_id │ bigint │ │ not null │ 0
join_event_id │ text │ │ │
joined_via │ text │ │ │
matrix=> \d partial_state_rooms_servers
Table "matrix.partial_state_rooms_servers"
Column │ Type │ Collation │ Nullable │ Default
═════════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
server_name │ text │ │ not null │
```
Indices, foreign-keys and check constraints are omitted for brevity.
</details>
While partially joined to a room, Synapse receives events `E` from remote
homeservers as normal, and can create events at the request of its local users.
However, we run into trouble when we enforce the [checks on an event].
> 1. Is a valid event, otherwise it is dropped. For an event to be valid, it
must contain a room_id, and it must comply with the event format of that
> room version.
> 2. Passes signature checks, otherwise it is dropped.
> 3. Passes hash checks, otherwise it is redacted before being processed further.
> 4. Passes authorization rules based on the events auth events, otherwise it
> is rejected.
> 5. **Passes authorization rules based on the state before the event, otherwise
> it is rejected.**
> 6. **Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.**
[checks on an event]: https://spec.matrix.org/v1.5/server-server-api/#checks-performed-on-receipt-of-a-pdu
We can enforce checks 1--4 without any problems.
But we cannot enforce checks 5 or 6 with complete certainty, since Synapse does
not know the full state before `E`, nor that of the room.
### Partial state
Instead, we make a best-effort approximation.
While the room is considered partially joined, Synapse tracks the "partial
state" before events.
This works in a similar way as regular state:
- The partial state before `J` is that given to us by the partial join response.
- The partial state before an event `E` is the resolution of the partial states
after each of `E`'s `prev_event`s.
- If `E` is rejected or a message event, the partial state after `E` is the
partial state before `E`.
- Otherwise, the partial state after `E` is the partial state before `E`, plus
`E` itself.
More concisely, partial state propagates just like full state; the only
difference is that we "seed" it with an incomplete initial state.
Synapse records that we have only calculated partial state for this event with
a row in `partial_state_events`.
While the room remains partially stated, check 5 on incoming events to that
room becomes:
> 5. Passes authorization rules based on **the resolution between the partial
> state before `E` and `E`'s auth events.** If the event fails to pass
> authorization rules, it is rejected.
Additionally, check 6 is deleted: no soft-failures are enforced.
While partially joined, the current partial state of the room is defined as the
resolution across the partial states after all forward extremities in the room.
_Remark._ Events with partial state are _not_ considered
[outliers](../room-dag-concepts.md#outliers).
### Approximation error
Using partial state means the auth checks can fail in a few different ways[^2].
[^2]: Is this exhaustive?
- We may erroneously accept an incoming event in check 5 based on partial state
when it would have been rejected based on full state, or vice versa.
- This means that an event could erroneously be added to the current partial
state of the room when it would not be present in the full state of the room,
or vice versa.
- Additionally, we may have skipped soft-failing an event that would have been
soft-failed based on full state.
(Note that the discrepancies described in the last two bullets are user-visible.)
This means that we have to be very careful when we want to lookup pieces of room
state in a partially-joined room. Our approximation of the state may be
incorrect or missing. But we can make some educated guesses. If
- our partial state is likely to be correct, or
- the consequences of our partial state being incorrect are minor,
then we proceed as normal, and let the resync process fix up any mistakes (see
below).
When is our partial state likely to be correct?
- It's more accurate the closer we are to the partial join event. (So we should
ideally complete the resync as soon as possible.)
- Non-member events: we will have received them as part of the partial join
response, if they were part of the room state at that point. We may
incorrectly accept or reject updates to that state (at first because we lack
remote membership information; later because of compounding errors), so these
can become incorrect over time.
- Local members' memberships: we are the only ones who can create join and
knock events for our users. We can't be completely confident in the
correctness of bans, invites and kicks from other homeservers, but the resync
process should correct any mistakes.
- Remote members' memberships: we did not receive these in the /send_join
response, so we have essentially no idea if these are correct or not.
In short, we deem it acceptable to trust the partial state for non-membership
and local membership events. For remote membership events, we wait for the
resync to complete, at which point we have the full state of the room and can
proceed as normal.
### Fixing the approximation with a resync
The partial-state approximation is only a temporary affair. In the background,
synapse beings a "resync" process. This is a continuous loop, starting at the
partial join event and proceeding downwards through the event graph. For each
`E` seen in the room since partial join, Synapse will fetch
- the event ids in the state of the room before `E`, via
[`/state_ids`](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1state_idsroomid);
- the event ids in the full auth chain of `E`, included in the `/state_ids`
response; and
- any events from the previous two bullets that Synapse hasn't persisted, via
[`/state](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1stateroomid).
This means Synapse has (or can compute) the full state before `E`, which allows
Synapse to properly authorise or reject `E`. At this point ,the event
is considered to have "full state" rather than "partial state". We record this
by removing `E` from the `partial_state_events` table.
\[**TODO:** Does Synapse persist a new state group for the full state
before `E`, or do we alter the (partial-)state group in-place? Are state groups
ever marked as partially-stated? \]
This scheme means it is possible for us to have accepted and sent an event to
clients, only to reject it during the resync. From a client's perspective, the
effect is similar to a retroactive
state change due to state resolution---i.e. a "state reset".[^3]
[^3]: Clients should refresh caches to detect such a change. Rumour has it that
sliding sync will fix this.
When all events since the join `J` have been fully-stated, the room resync
process is complete. We record this by removing the room from
`partial_state_rooms`.
## Faster joins on workers
For the time being, the resync process happens on the master worker.
A new replication stream `un_partial_stated_room` is added. Whenever a resync
completes and a partial-state room becomes fully stated, a new message is sent
into that stream containing the room ID.
## Notes on specific cases
> **NB.** The notes below are rough. Some of them are hidden under `<details>`
disclosures because they have yet to be implemented in mainline Synapse.
### Creating events during a partial join
When sending out messages during a partial join, we assume our partial state is
accurate and proceed as normal. For this to have any hope of succeeding at all,
our partial state must contain an entry for each of the (type, state key) pairs
[specified by the auth rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules):
- `m.room.create`
- `m.room.join_rules`
- `m.room.power_levels`
- `m.room.third_party_invite`
- `m.room.member`
The first four of these should be present in the state before `J` that is given
to us in the partial join response; only membership events are omitted. In order
for us to consider the user joined, we must have their membership event. That
means the only possible omission is the target's membership in an invite, kick
or ban.
The worst possibility is that we locally invite someone who is banned according to
the full state, because we lack their ban in our current partial state. The rest
of the federation---at least, those who are fully joined---should correctly
enforce the [membership transition constraints](
https://spec.matrix.org/v1.3/client-server-api/#room-membership
). So any the erroneous invite should be ignored by fully-joined
homeservers and resolved by the resync for partially-joined homeservers.
In more generality, there are two problems we're worrying about here:
- We might create an event that is valid under our partial state, only to later
find out that is actually invalid according to the full state.
- Or: we might refuse to create an event that is invalid under our partial
state, even though it would be perfectly valid under the full state.
However we expect such problems to be unlikely in practise, because
- We trust that the room has sensible power levels, e.g. that bad actors with
high power levels are demoted before their ban.
- We trust that the resident server provides us up-to-date power levels, join
rules, etc.
- State changes in rooms are relatively infrequent, and the resync period is
relatively quick.
#### Sending out the event over federation
**TODO:** needs prose fleshing out.
Normally: send out in a fed txn to all HSes in the room.
We only know that some HSes were in the room at some point. Wat do.
Send it out to the list of servers from the first join.
**TODO** what do we do here if we have full state?
If the prev event was created by us, we can risk sending it to the wrong HS. (Motivation: privacy concern of the content. Not such a big deal for a public room or an encrypted room. But non-encrypted invite-only...)
But don't want to send out sensitive data in other HS's events in this way.
Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do.
What about if we didn't send them an event but shouldn't've?
E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them.
Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left.
Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?)
- Don't do this currently.
- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages
- Gap: rooms which are infrequently used and take a long time to resync.
### Joining after a partial join
**NB.** Not yet implemented.
<details>
**TODO:** needs prose fleshing out. Liase with Matthieu. Explain why /send_join
(Rich was surprised we didn't just create it locally. Answer: to try and avoid
a join which then gets rejected after resync.)
We don't know for sure that any join we create would be accepted.
E.g. the joined user might have been banned; the join rules might have changed in a way that we didn't realise... some way in which the partial state was mistaken.
Instead, do another partial make-join/send-join handshake to confirm that the join works.
- Probably going to get a bunch of duplicate state events and auth events.... but the point of partial joins is that these should be small. Many are already persisted = good.
- What if the second send_join response includes a different list of reisdent HSes? Could ignore it.
- Could even have a special flag that says "just make me a join", i.e. don't bother giving me state or servers in room. Deffo want the auth chain tho.
- SQ: wrt device lists it's a lot safer to ignore it!!!!!
- What if the state at the second join is inconsistent with what we have? Ignore it?
</details>
### Leaving (and kicks and bans) after a partial join
**NB.** Not yet implemented.
<details>
When you're fully joined to a room, to have `U` leave a room their homeserver
needs to
- create a new leave event for `U` which will be accepted by other homeservers,
and
- send that event `U` out to the homeservers in the federation.
When is a leave event accepted? See
[v10 auth rules](https://spec.matrix.org/v1.5/rooms/v10/#authorization-rules):
> 4. If type is m.room.member: [...]
>
> 5. If membership is leave:
>
> 1. If the sender matches state_key, allow if and only if that users current membership state is invite, join, or knock.
> 2. [...]
I think this means that (well-formed!) self-leaves are governed entirely by
4.5.1. This means that if we correctly calculate state which says that `U` is
invited, joined or knocked and include it in the leave's auth events, our event
is accepted by checks 4 and 5 on incoming events.
> 4. Passes authorization rules based on the events auth events, otherwise
> it is rejected.
> 5. Passes authorization rules based on the state before the event, otherwise
> it is rejected.
The only way to fail check 6 is if the receiving server's current state of the
room says that `U` is banned, has left, or has no membership event. But this is
fine: the receiving server already thinks that `U` isn't in the room.
> 6. Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.
For the second point (publishing the leave event), the best thing we can do is
to is publish to all HSes we know to be currently in the room. If they miss that
event, they might send us traffic in the room that we don't care about. This is
a problem with leaving after a "full" join; we don't seek to fix this with
partial joins.
(With that said: there's nothing machine-readable in the /send response. I don't
think we can deduce "destination has left the room" from a failure to /send an
event into that room?)
#### Can we still do this during a partial join?
We can create leave events and can choose what gets included in our auth events,
so we can be sure that we pass check 4 on incoming events. For check 5, we might
have an incorrect view of the state before an event.
The only way we might erroneously think a leave is valid is if
- the partial state before the leave has `U` joined, invited or knocked, but
- the full state before the leave has `U` banned, left or not present,
in which case the leave doesn't make anything worse: other HSes already consider
us as not in the room, and will continue to do so after seeing the leave.
The remaining obstacle is then: can we safely broadcast the leave event? We may
miss servers or incorrectly think that a server is in the room. Or the
destination server may be offline and miss the transaction containing our leave
event.This should self-heal when they see an event whose `prev_events` descends
from our leave.
Another option we considered was to use federation `/send_leave` to ask a
fully-joined server to send out the event on our behalf. But that introduces
complexity without much benefit. Besides, as Rich put it,
> sending out leaves is pretty best-effort currently
so this is probably good enough as-is.
#### Cleanup after the last leave
**TODO**: what cleanup is necessary? Is it all just nice-to-have to save unused
work?
</details>

View File

@@ -16,21 +16,14 @@
There are two methods of enabling the metrics endpoint in Synapse.
The first serves the metrics as a part of the usual web server and
can be enabled by adding the `metrics` resource to the existing
listener as such as in this example:
can be enabled by adding the \"metrics\" resource to the existing
listener as such:
```yaml
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
# added "metrics" in this line
- names: [client, federation, metrics]
compress: false
resources:
- names:
- client
- metrics
```
This provides a simple way of adding metrics to your Synapse
@@ -44,24 +37,14 @@
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
Add a new listener to homeserver.yaml as in this example:
Add a new listener to homeserver.yaml:
```yaml
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
compress: false
# beginning of the new metrics listener
- port: 9000
type: metrics
bind_addresses: ['::1', '127.0.0.1']
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
```
1. Restart Synapse.
@@ -152,8 +135,6 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
convention of the upstream `prometheus_client`. The old names are
considered deprecated and will be removed in a future version of
Synapse.
**The old names will be disabled by default in Synapse v1.71.0 and removed
altogether in Synapse v1.73.0.**
| New Name | Old Name |
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@@ -165,13 +146,6 @@ altogether in Synapse v1.73.0.**
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
| synapse_util_caches_cache | synapse_util_caches_cache:total |
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
@@ -209,9 +183,6 @@ altogether in Synapse v1.73.0.**
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
| synapse_admin_mau_current | synapse_admin_mau:current |
| synapse_admin_mau_max | synapse_admin_mau:max |
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
@@ -290,7 +261,7 @@ Standard Metric Names
As of synapse version 0.18.2, the format of the process-wide metrics has
been changed to fit prometheus standard naming conventions. Additionally
the units have been changed to seconds, from milliseconds.
the units have been changed to seconds, from miliseconds.
| New name | Old name |
| ---------------------------------------- | --------------------------------- |

View File

@@ -146,9 +146,6 @@ Note that this callback is called when the event has already been processed and
into the room, which means this callback cannot be used to deny persisting the event. To
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
For any given event, this callback will be called on every worker process, even if that worker will not end up
acting on that event. This callback will not be called for events that are marked as rejected.
If multiple modules implement this callback, Synapse runs them all in order.
### `check_can_shutdown_room`

View File

@@ -59,8 +59,8 @@ namespace (such as anything under `/_matrix/client` for example). It is strongly
recommended that modules register their web resources under the `/_synapse/client`
namespace.
The provided resource is a Python class that implements Twisted's [IResource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.IResource.html)
interface (such as [Resource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.Resource.html)).
The provided resource is a Python class that implements Twisted's [IResource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.IResource.html)
interface (such as [Resource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.Resource.html)).
Only one resource can be registered for a given path. If several modules attempt to
register a resource for the same path, the module that appears first in Synapse's
@@ -82,4 +82,4 @@ the callback name as the argument name and the function as its value. A
`register_[...]_callbacks` method exists for each category.
Callbacks for each category can be found on their respective page of the
[Synapse documentation website](https://matrix-org.github.io/synapse).
[Synapse documentation website](https://matrix-org.github.io/synapse).

View File

@@ -49,13 +49,6 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
## OIDC Back-Channel Logout
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
## Sample configs
Here are a few configs for providers that should work with Synapse.
@@ -88,43 +81,93 @@ oidc_providers:
display_name_template: "{{ user.name }}"
```
### Apple
### Dex
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
You will need to create a new "Services ID" for SiWA, and create and download a
private key with "SiWA" enabled.
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
to install Dex.
As well as the private key file, you will need:
* Client ID: the "identifier" you gave the "Services ID"
* Team ID: a 10-character ID associated with your developer account.
* Key ID: the 10-character identifier for the key.
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
has more information on setting up SiWA.
The synapse config will look like this:
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
```yaml
- idp_id: apple
idp_name: Apple
issuer: "https://appleid.apple.com"
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
client_auth_method: "client_secret_post"
client_secret_jwt_key:
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
jwt_header:
alg: ES256
kid: "KEYIDCODE" # Set to the 10-char Key ID
jwt_payload:
iss: TEAMIDCODE # Set to the 10-char Team ID
scopes: ["name", "email", "openid"]
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
user_mapping_provider:
config:
email_template: "{{ user.email }}"
staticClients:
- id: synapse
secret: secret
redirectURIs:
- '[synapse public baseurl]/_synapse/client/oidc/callback'
name: 'Synapse'
```
Run with `dex serve examples/config-dev.yaml`.
Synapse config:
```yaml
oidc_providers:
- idp_id: dex
idp_name: "My Dex server"
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
```
### Keycloak
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_providers:
- idp_id: keycloak
idp_name: "My KeyCloak server"
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Auth0
[Auth0][auth0] is a hosted SaaS IdP solution.
@@ -205,43 +248,284 @@ oidc_providers:
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
```
### Dex
### LemonLDAP
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
to install Dex.
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
2. The parameters are:
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
Client ID`)
- Client secret (`Options > Basic > Client secret`)
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
(`Options > Security > ID Token signature algorithm` and `Options > Security >
Access Token signature algorithm`)
- Scopes: OpenID, Email and Profile
- Allowed redirection addresses for login (`Options > Basic > Allowed
redirection addresses for login` ) :
`[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
staticClients:
- id: synapse
secret: secret
redirectURIs:
- '[synapse public baseurl]/_synapse/client/oidc/callback'
name: 'Synapse'
oidc_providers:
- idp_id: lemonldap
idp_name: lemonldap
discover: true
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
client_id: "your client id" # TO BE FILLED
client_secret: "your client secret" # TO BE FILLED
scopes:
- "openid"
- "profile"
- "email"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}}"
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
display_name_template: "{{ user.preferred_username|capitalize }}"
```
Run with `dex serve examples/config-dev.yaml`.
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
Synapse config:
```yaml
oidc_providers:
- idp_id: dex
idp_name: "My Dex server"
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
- idp_id: github
idp_name: Github
idp_brand: "github" # optional: styling hint for clients
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### Google
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
1. Set up a project in the Google API Console (see
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_providers:
- idp_id: google
idp_name: Google
idp_brand: "google" # optional: styling hint for clients
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: twitch
idp_name: Twitch
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### GitLab
1. Create a [new application](https://gitlab.com/profile/applications).
2. Add the `read_user` and `openid` scopes.
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
idp_brand: "gitlab" # optional: styling hint for clients
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
scopes: ["openid", "read_user"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: '{{ user.nickname }}'
display_name_template: '{{ user.name }}'
```
### Facebook
0. You will need a Facebook developer account. You can register for one
[here](https://developers.facebook.com/async/registration/).
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
console, "Create App", and choose "Build Connected Experiences".
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
need to go through the whole form here.
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
URL.
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
and "App Secret" for use below.
Synapse config:
```yaml
- idp_id: facebook
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
discover: false
issuer: "https://www.facebook.com"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "email"]
authorization_endpoint: "https://facebook.com/dialog/oauth"
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
email_template: "{{ '{{ user.email }}' }}"
```
Relevant documents:
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
but it has a `response_types_supported` which excludes "code" (which we rely on, and
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### Gitea
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new application.
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitea
idp_name: Gitea
discover: false
issuer: "https://your-gitea.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: [] # Gitea doesn't support Scopes
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.full_name }}"
```
### XWiki
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
Synapse config:
```yaml
oidc_providers:
- idp_id: xwiki
idp_name: "XWiki"
issuer: "https://myxwikihost/xwiki/oidc/"
client_id: "your-client-id" # TO BE FILLED
client_auth_method: none
scopes: ["openid", "profile"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Apple
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
You will need to create a new "Services ID" for SiWA, and create and download a
private key with "SiWA" enabled.
As well as the private key file, you will need:
* Client ID: the "identifier" you gave the "Services ID"
* Team ID: a 10-character ID associated with your developer account.
* Key ID: the 10-character identifier for the key.
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
has more information on setting up SiWA.
The synapse config will look like this:
```yaml
- idp_id: apple
idp_name: Apple
issuer: "https://appleid.apple.com"
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
client_auth_method: "client_secret_post"
client_secret_jwt_key:
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
jwt_header:
alg: ES256
kid: "KEYIDCODE" # Set to the 10-char Key ID
jwt_payload:
iss: TEAMIDCODE # Set to the 10-char Team ID
scopes: ["name", "email", "openid"]
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
user_mapping_provider:
config:
email_template: "{{ user.email }}"
```
### Django OAuth Toolkit
@@ -291,420 +575,3 @@ oidc_providers:
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
email_template: "{{ user.email }}"
```
### Facebook
0. You will need a Facebook developer account. You can register for one
[here](https://developers.facebook.com/async/registration/).
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
console, "Create App", and choose "Build Connected Experiences".
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
need to go through the whole form here.
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
URL.
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
and "App Secret" for use below.
Synapse config:
```yaml
- idp_id: facebook
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
discover: false
issuer: "https://www.facebook.com"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "email"]
authorization_endpoint: "https://facebook.com/dialog/oauth"
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
Relevant documents:
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
but it has a `response_types_supported` which excludes "code" (which we rely on, and
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
Synapse config:
```yaml
oidc_providers:
- idp_id: github
idp_name: Github
idp_brand: "github" # optional: styling hint for clients
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### GitLab
1. Create a [new application](https://gitlab.com/profile/applications).
2. Add the `read_user` and `openid` scopes.
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
idp_brand: "gitlab" # optional: styling hint for clients
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
scopes: ["openid", "read_user"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: '{{ user.nickname }}'
display_name_template: '{{ user.name }}'
```
### Gitea
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new application.
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitea
idp_name: Gitea
discover: false
issuer: "https://your-gitea.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: [] # Gitea doesn't support Scopes
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.full_name }}"
```
### Google
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
1. Set up a project in the Google API Console (see
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_providers:
- idp_id: google
idp_name: Google
idp_brand: "google" # optional: styling hint for clients
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
### Keycloak
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
Follow the [Getting Started Guide](https://www.keycloak.org/guides) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_providers:
- idp_id: keycloak
idp_name: "My KeyCloak server"
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
backchannel_logout_enabled: true # Optional
```
### LemonLDAP
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
2. The parameters are:
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
Client ID`)
- Client secret (`Options > Basic > Client secret`)
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
(`Options > Security > ID Token signature algorithm` and `Options > Security >
Access Token signature algorithm`)
- Scopes: OpenID, Email and Profile
- Allowed redirection addresses for login (`Options > Basic > Allowed
redirection addresses for login` ) :
`[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: lemonldap
idp_name: lemonldap
discover: true
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
client_id: "your client id" # TO BE FILLED
client_secret: "your client secret" # TO BE FILLED
scopes:
- "openid"
- "profile"
- "email"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}}"
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
display_name_template: "{{ user.preferred_username|capitalize }}"
```
### Mastodon
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
This example assumes that:
* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
* Synapse will be registered as an app named `my_synapse_app`.
Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
```sh
curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
```
You should receive a response similar to the following. Make sure to save it.
```json
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
```
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
```yaml
oidc_providers:
- idp_id: my_mastodon
idp_name: "Mastodon Instance Example"
discover: false
issuer: "https://your.mastodon.instance.url/@admin"
client_id: "someclientid_123"
client_secret: "someclientsecret_123"
authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
token_endpoint: "https://your.mastodon.instance.url/oauth/token"
userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
scopes: ["read"]
user_mapping_provider:
config:
subject_claim: "id"
```
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
### Shibboleth with OIDC Plugin
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly.
2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret`
are randomly generated data.
```json
{
"client_id": "SOME-CLIENT-ID",
"client_secret": "SOME-SUPER-SECRET-SECRET",
"response_types": ["code"],
"grant_types": ["authorization_code"],
"scope": "openid profile email",
"redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"]
}
```
Synapse config:
```yaml
oidc_providers:
# Shibboleth IDP
#
- idp_id: shibboleth
idp_name: "Shibboleth Login"
discover: true
issuer: "https://YOUR-IDP-URL.TLD"
client_id: "YOUR_CLIENT_ID"
client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP"
scopes: ["openid", "profile", "email"]
allow_existing_users: true
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
subject_claim: "sub"
localpart_template: "{{ user.sub.split('@')[0] }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: twitch
idp_name: Twitch
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Twitter
*Using Twitter as an identity provider requires using Synapse 1.75.0 or later.*
1. Setup a developer account on [Twitter](https://developer.twitter.com/en/portal/dashboard)
2. Create a project & app.
3. Enable user authentication and under "Type of App" choose "Web App, Automated App or Bot".
4. Under "App info" set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
5. Obtain the OAuth 2.0 credentials under the "Keys and tokens" tab, copy the "OAuth 2.0 Client ID and Client Secret"
Synapse config:
```yaml
oidc_providers:
- idp_id: twitter
idp_name: Twitter
idp_brand: "twitter" # optional: styling hint for clients
discover: false # Twitter is not OpenID compliant.
issuer: "https://twitter.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
pkce_method: "always"
# offline.access providers refresh tokens, tweet.read and users.read needed for userinfo request.
scopes: ["offline.access", "tweet.read", "users.read"]
authorization_endpoint: https://twitter.com/i/oauth2/authorize
token_endpoint: https://api.twitter.com/2/oauth2/token
userinfo_endpoint: https://api.twitter.com/2/users/me?user.fields=profile_image_url
user_mapping_provider:
config:
subject_template: "{{ user.data.id }}"
localpart_template: "{{ user.data.username }}"
display_name_template: "{{ user.data.name }}"
picture_template: "{{ user.data.profile_image_url }}"
```
### XWiki
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
Synapse config:
```yaml
oidc_providers:
- idp_id: xwiki
idp_name: "XWiki"
issuer: "https://myxwikihost/xwiki/oidc/"
client_id: "your-client-id" # TO BE FILLED
client_auth_method: none
scopes: ["openid", "profile"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```

View File

@@ -1,7 +1,6 @@
# Using Postgres
The minimum supported version of PostgreSQL is determined by the [Dependency
Deprecation Policy](deprecation_policy.md).
Synapse supports PostgreSQL versions 10 or later.
## Install postgres client libraries
@@ -16,7 +15,7 @@ connect to a postgres database.
- For other pre-built packages, please consult the documentation from
the relevant package.
- If you installed synapse [in a
virtualenv](setup/installation.md#installing-as-a-python-module-from-pypi), you can install
virtualenv](setup/installation.md#installing-from-source), you can install
the library with:
~/synapse/env/bin/pip install "matrix-synapse[postgres]"

View File

@@ -45,10 +45,6 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
when using a containerized Synapse, as that will prevent it from responding
to proxied traffic.)
Optionally, you can also set
[`request_id_header`](./usage/configuration/config_documentation.md#listeners)
so that the server extracts and re-uses the same request ID format that the
reverse proxy is using.
## Reverse-proxy configuration examples
@@ -79,9 +75,6 @@ server {
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
proxy_http_version 1.1;
}
}
```

View File

@@ -6,7 +6,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1

Some files were not shown because too many files have changed in this diff Show More