1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Action Bot
fdfcfaa042 Version picker added for v1.61 docs 2023-12-11 14:52:19 +00:00
635 changed files with 22451 additions and 43435 deletions

View File

@@ -1,91 +0,0 @@
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
{{- /*
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
we are creating a stylized header for each package.
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
which is under the Unlicense licence.
*/ -}}
{{- $settings := .Settings -}}
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
{{- if eq .Result "PASS" -}}
{{ "\033" }}[0;32m
{{- else if eq .Result "SKIP" -}}
{{ "\033" }}[0;33m
{{- else -}}
{{ "\033" }}[0;31m
{{- end -}}
📦 {{ .Name }}{{- "\033" }}[0m
{{- with .Coverage -}}
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
{{- end -}}
{{- "\n" -}}
{{- with .Reason -}}
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
{{- end -}}
{{- with .Output -}}
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Passing tests are first */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* and failing tests are last */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- "\n" -}}
{{- end -}}

View File

@@ -1,132 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
# compatible wheel, if so rename the wheel before repairing it.
import argparse
import os
import subprocess
from typing import Optional
from zipfile import ZipFile
from packaging.tags import Tag
from packaging.utils import parse_wheel_filename
from packaging.version import Version
def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}")
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
"""Entry point"""
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
# normalizes the package name (i.e. it converts matrix_synapse ->
# matrix-synapse), which is not what we want.
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
name = os.path.basename(wheel_file).split("-")[0]
if len(tags) != 1:
# We expect only a wheel file with only a single tag
raise Exception(f"Unexpectedly found multiple tags: {tags}")
tag = next(iter(tags))
if build:
# We don't use build tags in Synapse
raise Exception(f"Unexpected build tag: {build}")
# If the wheel is for cpython then convert it into an abi3 wheel.
if tag.interpreter.startswith("cp"):
wheel_file = cpython(wheel_file, name, version, tag)
# Finally, repair the wheel.
if archs is not None:
# If we are given archs then we are on macos and need to use
# `delocate-listdeps`.
subprocess.run(["delocate-listdeps", wheel_file], check=True)
subprocess.run(
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
check=True,
)
else:
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
parser.add_argument(
"--wheel-dir",
"-w",
metavar="WHEEL_DIR",
help="Directory to store delocated wheels",
required=True,
)
parser.add_argument(
"--require-archs",
metavar="archs",
default=None,
)
parser.add_argument(
"wheel_file",
metavar="WHEEL_FILE",
)
args = parser.parse_args()
wheel_file = args.wheel_file
wheel_dir = args.wheel_dir
archs = args.require_archs
main(wheel_file, wheel_dir, archs)

View File

@@ -1,135 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calculate the trial jobs to run based on if we're in a PR or not.
import json
import os
def set_output(key: str, value: str):
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
print(f"{key}={value}", file=f)
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
]
if not IS_PR:
trial_sqlite_tests.extend(
{
"python-version": version,
"database": "sqlite",
"extras": "all",
}
for version in ("3.8", "3.9", "3.10")
)
trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
"postgres-version": "10",
"extras": "all",
}
]
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.10",
"database": "postgres",
"postgres-version": "14",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
]
print("::group::Calculated trial jobs")
print(
json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
)
)
print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "focal",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)
print("::group::Calculated sytest jobs")
print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
set_output("sytest_test_matrix", test_matrix)

View File

@@ -1,21 +0,0 @@
#!/bin/bash
#
# wraps `gotestfmt`, hiding output from successful packages unless
# all tests passed.
set -o pipefail
set -e
# tee the test results to a log, whilst also piping them into gotestfmt,
# telling it to hide successful results, so that we can clearly see
# unsuccessful results.
tee complement.log | gotestfmt -hide successful-packages
# gotestfmt will exit non-zero if there were any failures, so if we got to this
# point, we must have had a successful result.
echo "All tests successful; showing all test results"
# Pipe the test results back through gotestfmt, showing all results.
# The log file consists of JSON lines giving the test results, interspersed
# with regular stdout lines (including reports of downloaded packages).
grep '^{"Time":' complement.log | gotestfmt

31
.ci/scripts/postgres_exec.py Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="localhost", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View File

@@ -1,36 +0,0 @@
#!/bin/sh
#
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
#
# Must be called after Synapse has been checked out to `synapse/`.
#
set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
endblock
block Check out Complement
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
synapse/.ci/scripts/checkout_complement.sh
endblock

View File

@@ -32,7 +32,7 @@ else
fi
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"

View File

@@ -5,8 +5,18 @@
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
@@ -17,15 +27,19 @@ export VIRTUALENV_NO_DOWNLOAD=1
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
@@ -39,7 +53,7 @@ sed -i \
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
@@ -53,8 +67,8 @@ with open('pyproject.toml', 'w') as f:
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.2.0
poetry lock
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
@@ -62,3 +76,6 @@ echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests

View File

@@ -2,27 +2,27 @@
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe -o pipefail
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
@@ -45,23 +45,9 @@ rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -4,15 +4,8 @@
# things to include
!docker
!synapse
!rust
!README.rst
!pyproject.toml
!poetry.lock
!Cargo.lock
!Cargo.toml
!build_rust.py
rust/target
synapse/*.so
**/__pycache__

View File

@@ -7,4 +7,3 @@ root = true
[*.py]
indent_style = space
indent_size = 4
max_line_length = 88

View File

@@ -1,16 +1,3 @@
# Commits in this file will be removed from GitHub blame results.
#
# To use this file locally, use:
# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" <files>
#
# or configure the `blame.ignoreRevsFile` option in your git config.
#
# If ignoring a pull request that was not squash merged, only the merge
# commit needs to be put here. Child commits will be resolved from it.
# Run black (#3679).
8b3d9b6b199abb87246f982d5db356f1966db925
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3

72
.github/ISSUE_TEMPLATE/BUG_REPORT.md vendored Normal file
View File

@@ -0,0 +1,72 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
<!-- Describe here the problem that you are experiencing -->
### Steps to reproduce
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
-->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**:
If not matrix.org:
<!--
What version of Synapse is running?
You can find the Synapse version with this command:
$ curl http://localhost:8008/_synapse/admin/v1/server_version
(You may need to replace `localhost:8008` if Synapse is not configured to
listen on that port.)
-->
- **Version**:
- **Install method**:
<!-- examples: package manager/git clone/pip -->
- **Platform**:
<!--
Tell us about the environment in which your homeserver is operating
distro, hardware, if it's running in a vm/container, etc.
-->

View File

@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
You can also preview your report before submitting it.
- type: textarea
id: description
attributes:
label: Description
description: Describe the problem that you are experiencing
validations:
required: true
- type: textarea
id: reproduction_steps
attributes:
label: Steps to reproduce
description: |
Describe the series of steps that leads you to the problem.
Describe how what happens differs from what you expected.
placeholder: Tell us what you see!
value: |
- list the steps
- that reproduce the bug
- using hyphens as bullet points
validations:
required: true
- type: markdown
attributes:
value: |
---
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
- type: input
id: homeserver
attributes:
label: Homeserver
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
validations:
required: true
- type: input
id: version
attributes:
label: Synapse Version
description: |
What version of Synapse is this homeserver running?
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
or with this command:
```
$ curl http://localhost:8008/_synapse/admin/v1/server_version
```
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
validations:
required: true
- type: dropdown
id: install_method
attributes:
label: Installation Method
options:
- Docker (matrixdotorg/synapse)
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- type: textarea
id: platform
attributes:
label: Platform
description: |
Tell us about the environment in which your homeserver is operating...
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true
- type: textarea
id: anything_else
attributes:
label: Anything else that would be useful to know?

View File

@@ -1,22 +0,0 @@
version: 2
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -1,46 +0,0 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
run: |
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View File

@@ -17,19 +17,19 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v1
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -48,15 +48,10 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v3
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@@ -17,7 +17,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
@@ -54,7 +54,7 @@ jobs:
esac
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
echo "::set-output name=branch-version::$branch"
# Deploy to the target directory.
- name: Deploy to gh pages

View File

@@ -5,7 +5,7 @@
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
@@ -25,20 +25,13 @@ jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
@@ -59,15 +52,7 @@ jobs:
postgres-version: "14"
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
@@ -76,7 +61,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v4
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
@@ -84,12 +69,6 @@ jobs:
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
@@ -133,15 +112,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
@@ -155,7 +126,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -164,56 +135,25 @@ jobs:
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
# TODO: run complement (as with twisted trunk, see #12473).
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
# open an issue if the build fails, so we know about it.
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -11,12 +11,11 @@ on:
# we do the full build on tags.
tags: ["v*"]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
@@ -25,10 +24,8 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
@@ -36,7 +33,7 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
echo "distros=$dists" >> "$GITHUB_OUTPUT"
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
@@ -52,18 +49,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v1
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -71,9 +68,7 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: '3.x'
uses: actions/setup-python@v2
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
@@ -89,96 +84,14 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: debs
path: debs/*
build-wheels:
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-10.15]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-10.15"
# Don't build aarch64 wheels on mac.
- os: "macos-10.15"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
arch: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Build aarch64 wheels
if: matrix.arch == 'aarch64'
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
name: Build sdist
runs-on: ubuntu-latest
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- run: pip install build
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
@@ -186,12 +99,11 @@ jobs:
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-wheels
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v2
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release

View File

@@ -4,48 +4,20 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
runs-on: ubuntu-latest
outputs:
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
steps:
- uses: dorny/paths-filter@v2
id: filter
# We only check on PRs
if: startsWith(github.ref, 'refs/pull/')
with:
filters: |
rust:
- 'rust/**'
- 'Cargo.toml'
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
@@ -55,129 +27,79 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
lint-clippy:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy
lint-rustfmt:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- lint-clippy
- lint-rustfmt
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig]
runs-on: ubuntu-latest
steps:
- run: "true"
calculate-test-jobs:
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: calculate-test-jobs
runs-on: ubuntu-latest
strategy:
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
# Oldest Python with PostgreSQL
- python-version: "3.7"
database: "postgres"
postgres-version: "10"
extras: "all"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
postgres:${{ matrix.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
extras: ${{ matrix.job.extras }}
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
@@ -198,54 +120,16 @@ jobs:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
extras: "all test"
- run: poetry run trial -j2 tests
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -271,7 +155,7 @@ jobs:
extras: ["all"]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
@@ -294,37 +178,50 @@ jobs:
sytest:
if: ${{ !failure() && !cancelled() }}
needs: calculate-test-jobs
needs: linting-done
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
POSTGRES: ${{ matrix.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: focal
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: postgres
workers: workers
redis: redis
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
@@ -332,10 +229,10 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
@@ -362,23 +259,20 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
strategy:
matrix:
include:
@@ -403,99 +297,103 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
complement-developonly:
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
needs: linting-done
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: Install Rust
uses: actions-rs/toolchain@v1
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
path: synapse
- run: cargo test
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- check-sampleconfig
- lint
- lint-crlf
- lint-newsfile
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
- cargo-test
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -503,7 +401,5 @@ jobs:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
# Cargo test is skipped if there is no changes on Rust code
skippable: |
skippable:
lint-newsfile
cargo-test

View File

@@ -1,28 +0,0 @@
name: Move new issues into the issue triage board
on:
issues:
types: [ opened ]
jobs:
add_new_issues:
name: Add new issues to the triage board
runs-on: ubuntu-latest
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation add_to_project($projectid:ID!,$contentid:ID!) {
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}

View File

@@ -1,44 +0,0 @@
name: Move labelled issues to correct projects
on:
issues:
types: [ labeled ]
jobs:
move_needs_info:
name: Move X-Needs-Info on the triage board
runs-on: ubuntu-latest
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation {
updateProjectV2ItemFieldValue(
input: {
projectId: $projectid
itemId: $contentid
fieldId: $fieldid
value: {
singleSelectOptionId: "Todo"
}
}
) {
projectV2Item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
fieldid: ${{ env.FIELD_ID }}
optionid: ${{ env.OPTION_ID }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
OPTION_ID: "ba22e43c"

View File

@@ -15,15 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -40,16 +32,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -81,15 +65,7 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
@@ -112,7 +88,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -120,51 +96,6 @@ jobs:
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.1.14
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
@@ -172,12 +103,11 @@ jobs:
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

10
.gitignore vendored
View File

@@ -15,9 +15,8 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry and cargo lockfile.
# We do want the poetry lockfile.
!poetry.lock
!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -61,10 +60,3 @@ book/
# complement
/complement-*
/master.tar.gz
# rust
/target/
/synapse/*.so
# Poetry will create a setup.py, which we don't want to include.
/setup.py

View File

@@ -1 +0,0 @@
group_imports = "StdExternalCrate"

1076
CHANGES.md

File diff suppressed because it is too large Load Diff

466
Cargo.lock generated
View File

@@ -1,466 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
[[package]]
name = "arc-swap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
dependencies = [
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "digest"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "indoc"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "proc-macro2"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-log"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
dependencies = [
"arc-swap",
"log",
"pyo3",
]
[[package]]
name = "pyo3-macros"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pythonize"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
dependencies = [
"pyo3",
"serde",
]
[[package]]
name = "quote"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
"hex",
"lazy_static",
"log",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
]
[[package]]
name = "target-lexicon"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -1,5 +0,0 @@
# We make the whole Synapse folder a workspace so that we can run `cargo`
# commands from the root (rather than having to cd into rust/).
[workspace]
members = ["rust"]

View File

@@ -2,70 +2,152 @@
Synapse |support| |development| |documentation| |license| |pypi| |python|
=========================================================================
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
maintained by the Matrix.org Foundation. We began rapid development in 2014,
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
in earnest today.
Briefly, Matrix is an open standard for communications on the internet, supporting
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
<https://spec.matrix.org/>`_ describes the technical details.
.. contents::
Installing and configuration
============================
Introduction
============
The Synapse documentation describes `how to install Synapse <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_. We recommend using
`Docker images <https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
<https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
VoIP. The basics you need to know to get up and running are:
- Everything in Matrix happens in a room. Rooms are distributed and do not
exist on any single server. Rooms can be located using convenience aliases
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
you will normally refer to yourself and others using a third party identifier
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
The overall architecture is::
client <----> homeserver <=====================> homeserver <----> client
https://somewhere.org/_matrix https://elsewhere.net/_matrix
``#matrix:matrix.org`` is the official support room for Matrix, and can be
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
via IRC bridge at irc://irc.libera.chat/matrix.
Synapse is currently in rapid development, but as of version 0.5 we believe it
is sufficiently stable to be run as an internet-facing service for real usage!
About Matrix
============
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
which handle:
- Creating and managing fully distributed chat rooms with no
single points of control or failure
- Eventually-consistent cryptographically secure synchronisation of room
state across a global open network of federated servers and services
- Sending and receiving extensible messages in a room with (optional)
end-to-end encryption
- Inviting, joining, leaving, kicking, banning room members
- Managing user accounts (registration, login, logout)
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
Facebook accounts to authenticate, identify and discover users on Matrix.
- Placing 1:1 VoIP and Video calls
These APIs are intended to be implemented on a wide range of servers, services
and clients, letting developers build messaging and VoIP functionality on top
of the entirely open Matrix ecosystem rather than using closed or proprietary
solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
team, written in Python 3/Twisted.
In Matrix, every user runs one or more Matrix clients, which connect through to
a Matrix homeserver. The homeserver stores all their personal chat history and
user account information - much as a mail client connects through to an
IMAP/SMTP server. Just like email, you can either run your own Matrix
homeserver and control and own your own communications and history or use one
hosted by someone else (e.g. matrix.org) - there is no single point of control
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
etc.
We'd like to invite you to join #matrix:matrix.org (via
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
Thanks for using Matrix!
Support
=======
For support installing or managing Synapse, please join |room|_ (from a matrix.org
account if necessary) and ask questions there. We do not use GitHub issues for
support requests, only for bug reports and feature requests.
Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
with its source available in |docs|_.
.. |room| replace:: ``#synapse:matrix.org``
.. _room: https://matrix.to/#/#synapse:matrix.org
.. |docs| replace:: ``docs``
.. _docs: docs
Synapse Installation
====================
.. _federation:
Synapse has a variety of `config options
<https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
which can be used to customise its behaviour after installation.
There are additional details on how to `configure Synapse for federation here
<https://matrix-org.github.io/synapse/latest/federate.html>`_.
.. _reverse-proxy:
Using a reverse proxy with Synapse
----------------------------------
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
For information on configuring one, see `the reverse proxy docs
<https://matrix-org.github.io/synapse/latest/reverse_proxy.html>`_.
Upgrading an existing Synapse
-----------------------------
The instructions for upgrading Synapse are in `the upgrade notes`_.
Please check these instructions as upgrading may require extra steps for some
versions of Synapse.
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
* For details on how to install synapse, see
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
Platform dependencies
---------------------
Connecting to Synapse from a client
===================================
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
and aims to follow supported upstream versions. See the
`deprecation policy <https://matrix-org.github.io/synapse/latest/deprecation_policy.html>`_
for more details.
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
If all goes well you should at least be able to log in, create a room, and
start sending messages.
.. _`client-user-reg`:
Registering a new user from a client
------------------------------------
By default, registration of new users via Matrix clients is disabled. To enable
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
Once ``enable_registration`` is set to ``true``, it is possible to register a
user via a Matrix client.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
the form of::
@localpart:my.domain.name
(pronounced "at localpart on my dot domain dot name").
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box.
Security note
-------------
=============
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
repository endpoints`_.
@@ -105,76 +187,30 @@ Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.
Testing a new installation
==========================
Upgrading an existing Synapse
=============================
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
The instructions for upgrading synapse are in `the upgrade notes`_.
Please check these instructions as upgrading may require extra steps for some
versions of synapse.
Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
.. _reverse-proxy:
If all goes well you should at least be able to log in, create a room, and
start sending messages.
Using a reverse proxy with Synapse
==================================
.. _`client-user-reg`:
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
Registering a new user from a client
------------------------------------
By default, registration of new users via Matrix clients is disabled. To enable
it:
1. In the
`registration config section <https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
set ``enable_registration: true`` in ``homeserver.yaml``.
2. Then **either**:
a. set up a `CAPTCHA <https://matrix-org.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
the public internet. Without it, anyone can freely register accounts on your homeserver.
This can be exploited by attackers to create spambots targetting the rest of the Matrix
federation.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
the form of::
@localpart:my.domain.name
(pronounced "at localpart on my dot domain dot name").
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box.
Troubleshooting and support
===========================
The `Admin FAQ <https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html>`_
includes tips on dealing with some common problems. For more details, see
`Synapse's wider documentation <https://matrix-org.github.io/synapse/latest/>`_.
For additional support installing or managing Synapse, please ask in the community
support room |room|_ (from a matrix.org account if necessary). We do not use GitHub
issues for support requests, only for bug reports and feature requests.
.. |room| replace:: ``#synapse:matrix.org``
.. _room: https://matrix.to/#/#synapse:matrix.org
.. |docs| replace:: ``docs``
.. _docs: docs
For information on configuring one, see `<docs/reverse_proxy.md>`_.
Identity Servers
================
@@ -206,15 +242,34 @@ an email address with your account, or send an invite to another user via their
email address.
Development
===========
Password reset
==============
Users can reset their password through their client. Alternatively, a server admin
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
or by directly editing the database as shown below.
First calculate the hash of the new password::
$ ~/synapse/env/bin/hash_password
Password:
Confirm password:
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Then update the ``users`` table in the database::
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
WHERE name='@test:test.com';
Synapse Development
===================
We welcome contributions to Synapse from the community!
The best place to get started is our
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
information for synapse developers as well as synapse administrators.
information for Synapse developers as well as Synapse administrators.
Developers might be particularly interested in:
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
@@ -225,6 +280,187 @@ Alongside all that, join our developer community on Matrix:
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
Quick start
-----------
Before setting up a development environment for synapse, make sure you have the
system dependencies (such as the python header files) installed - see
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
To check out a synapse for development, clone the git repo into a working
directory of your choice::
git clone https://github.com/matrix-org/synapse.git
cd synapse
Synapse has a number of external dependencies. We maintain a fixed development
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
pip install --user pipx
pipx install poetry
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
for other installation methods.) Then ask poetry to create a virtual environment
from the project and install Synapse's dependencies::
poetry install --extras "all test"
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
poetry run ./demo/start.sh
(to stop, you can use ``poetry run ./demo/stop.sh``)
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
for more information.
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
poetry run synapse_homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
poetry run synapse_homeserver --config-path homeserver.yaml
Running the unit tests
----------------------
After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
poetry run trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::
Ran 1337 tests in 716.064s
PASSED (skips=15, successes=1322)
For more tips on running the unit tests, like running a specific test or
to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
Running the Integration Tests
-----------------------------
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
a Matrix homeserver integration testing suite, which uses HTTP requests to
access the API as a Matrix client would. It is able to run Synapse directly from
the source tree, so installation of the server is not required.
Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `SyTest installation
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
Platform dependencies
=====================
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
and aims to follow supported upstream versions. See the
`<docs/deprecation_policy.md>`_ document for more details.
Troubleshooting
===============
Need help? Join our community support room on Matrix:
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
Running out of File Handles
---------------------------
If synapse runs out of file handles, it typically fails badly - live-locking
at 100% CPU, and/or failing to accept new TCP connections (blocking the
connecting client). Matrix currently can legitimately use a lot of file handles,
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
servers. The first time a server talks in a room it will try to connect
simultaneously to all participating servers, which could exhaust the available
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
to respond. (We need to improve the routing algorithm used to be better than
full mesh, but as of March 2019 this hasn't happened yet).
If you hit this failure mode, we recommend increasing the maximum number of
open file handles to be at least 4096 (assuming a default of 1024 or 256).
This is typically done by editing ``/etc/security/limits.conf``
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
during processing - e.g. blocked behind a lock or talking to a remote server etc.
This is best diagnosed by matching up the 'Received request' and 'Processed request'
log lines and looking for any 'Processed request' lines which take more than
a few seconds to execute. Please let us know at #synapse:matrix.org if
you see this failure mode so we can help debug it, however.
Help!! Synapse is slow and eats all my RAM/CPU!
-----------------------------------------------
First, ensure you are running the latest version of Synapse, using Python 3
with a PostgreSQL database.
Synapse's architecture is quite RAM hungry currently - we deliberately
cache a lot of recent room data and metadata in RAM in order to speed up
common requests. We'll improve this in the future, but for now the easiest
way to either reduce the RAM usage (at the risk of slowing things down)
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to
degrade.
However, degraded performance due to a low cache factor, common on
machines with slow disks, often leads to explosions in memory use due
backlogged requests. In this case, reducing the cache factor will make
things worse. Instead, try increasing it drastically. 2.0 is a good
starting value.
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
improvement in overall memory use, and especially in terms of giving back
RAM to the OS. To use it, the library must simply be put in the
LD_PRELOAD environment variable when launching Synapse. On Debian, this
can be done by installing the ``libjemalloc1`` package and adding this
line to ``/etc/default/matrix-synapse``::
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
This can make a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.
If you're encountering high CPU use by the Synapse process itself, you
may be affected by a bug with presence tracking that leads to a
massive excess of outgoing federation requests (see `discussion
<https://github.com/matrix-org/synapse/issues/3971>`_). If metrics
indicate that your server is also issuing far more outgoing federation
requests than can be accounted for by your users' activity, this is a
likely cause. The misbehavior can be worked around by setting
the following in the Synapse config file:
.. code-block:: yaml
presence:
enabled: false
People can't accept room invitations from me
--------------------------------------------
The typical failure mode here is that you send an invitation to someone
to join a room or direct chat, but when they go to accept it, they get an
error (typically along the lines of "Invalid signature"). They might see
something like the following in their logs::
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
This is normally caused by a misconfiguration in your reverse-proxy. See
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org

View File

@@ -1,23 +0,0 @@
# A build script for poetry that adds the rust extension.
import os
from typing import Any, Dict
from setuptools_rust import Binding, RustExtension
def build(setup_kwargs: Dict[str, Any]) -> None:
original_project_dir = os.path.dirname(os.path.realpath(__file__))
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
extension = RustExtension(
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False

View File

@@ -1,111 +0,0 @@
# Setting up Synapse with Workers using Docker Compose
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
Example worker configuration files can be found [here](workers).
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
An example Docker Compose file can be found [here](docker-compose.yaml).
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
### Generic Worker Example
```yaml
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
```
### Federation Sender Example
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
```yaml
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse
```
## `homeserver.yaml` Configuration
### Enable Redis
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
```yaml
redis:
enabled: true
host: redis
port: 6379
# password: <secret_password>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
### Add a replication Listener
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
```yaml
listeners:
# Other listeners
- port: 9093
type: http
resources:
- names: [replication]
```
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
```yaml
worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
```yaml
# This will disable federation sending on the main Synapse instance
send_federation: false
federation_sender_instances:
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
```
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -1,77 +0,0 @@
networks:
backend:
services:
postgres:
image: postgres:latest
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
networks:
- backend
environment:
POSTGRES_DB: synapse
POSTGRES_USER: synapse_user
POSTGRES_PASSWORD: postgres
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
redis:
image: redis:latest
restart: unless-stopped
networks:
- backend
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/data:rw
ports:
- 8008:8008
networks:
- backend
environment:
SYNAPSE_CONFIG_DIR: /data
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
depends_on:
- postgres
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse

View File

@@ -1,8 +0,0 @@
worker_app: synapse.app.federation_sender
worker_name: synapse-federation-sender-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_log_config: /data/federation_sender.log.config

View File

@@ -1,15 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: synapse-generic-worker-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8081
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /data/worker.log.config

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,11 @@
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,25 +20,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cgi
import datetime
import json
import urllib.request
from typing import List
import pydot
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
def make_name(pdu_id: str, origin: str) -> str:
return f"{pdu_id}@{origin}"
def make_graph(pdus: List[dict], filename_prefix: str) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by querying a homeserver.
"""
def make_graph(pdus, room, filename_prefix):
pdu_map = {}
node_map = {}
@@ -116,10 +111,10 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
graph.write_svg("%s.svg" % filename_prefix, prog="dot")
def get_pdus(host: str, room: str) -> List[dict]:
def get_pdus(host, room):
transaction = json.loads(
urllib.request.urlopen(
f"http://{host}/_matrix/federation/v1/context/{room}/"
urllib2.urlopen(
"http://%s/_matrix/federation/v1/context/%s/" % (host, room)
).read()
)
@@ -146,4 +141,4 @@ if __name__ == "__main__":
pdus = get_pdus(host, room)
make_graph(pdus, prefix)
make_graph(pdus, room, prefix)

View File

@@ -14,31 +14,22 @@
import argparse
import cgi
import datetime
import html
import json
import sqlite3
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading from a Synapse SQLite database.
"""
def make_graph(db_name, room_id, file_prefix, limit):
conn = sqlite3.connect(db_name)
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
c = conn.execute(sql, (room_id,))
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
sql = (
"SELECT json, internal_metadata FROM event_json as j "
"SELECT json FROM event_json as j "
"INNER JOIN events as e ON e.event_id = j.event_id "
"WHERE j.room_id = ?"
)
@@ -52,10 +43,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
c = conn.execute(sql, args)
events = [
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
for e in c.fetchall()
]
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
events.sort(key=lambda e: e.depth)
@@ -96,7 +84,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": html.escape(content, quote=True),
"content": cgi.escape(content, quote=True),
"time": t,
"depth": event.depth,
"state_group": state_group,
@@ -108,11 +96,11 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
graph.add_node(node)
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -124,7 +112,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),))
for event_id in event_ids:
cluster.add_node(node_map[event_id])
@@ -138,7 +126,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given Synapse SQLite file to get the list of PDUs. \n"
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(

View File

@@ -1,3 +1,13 @@
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,35 +22,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.util.frozenutils import unfreeze
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading line-delimited JSON from a file.
"""
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
# Figure out the room version, assume the first line is the create event.
room_version = KNOWN_ROOM_VERSIONS[
json.loads(lines[0]).get("content", {}).get("room_version")
]
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
events = [FrozenEvent(json.loads(line)) for line in lines]
print("Loaded events.")
@@ -76,8 +66,8 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
content.append(
"<b>%s</b>: %s,"
% (
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
)
)
@@ -111,11 +101,11 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
print("Created Nodes")
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -149,7 +139,8 @@ if __name__ == "__main__":
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
parser.add_argument("room")
args = parser.parse_args()
make_graph(args.event_file, args.prefix, args.limit)
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -0,0 +1,21 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0

View File

@@ -1,20 +1,37 @@
groups:
- name: synapse
rules:
- record: "synapse_federation_transaction_queue_pendingEdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
- record: 'synapse_http_server_request_count:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- record: 'synapse_cache:hit_ratio_30s'
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
###
### Prometheus Console Only
### The following rules are only needed if you use the Prometheus Console
### in contrib/prometheus/consoles/synapse.html
###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus_total + 0'
expr: 'synapse_federation_client_sent_edus + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
@@ -23,11 +40,11 @@ groups:
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus_total + 0'
expr: 'synapse_federation_server_received_edus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus_total + 0'
expr: 'synapse_federation_server_received_pdus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
@@ -41,34 +58,21 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
###
### End of 'Prometheus Console Only' rules block
###
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###
expr: sum without(type) (synapse_storage_events_persisted_events_sep)

View File

@@ -1,33 +0,0 @@
# Creating multiple generic workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
```sh
#!/bin/bash
for i in {1..5}
do
cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs.

View File

@@ -1,145 +0,0 @@
# Creating multiple stream writers with a bash script
This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers.
Stream writers require both replication and HTTP listeners.
It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
# Start with these replication and http ports.
# The script loop starts with the exact port and then increments it by one.
REP_START_PORT=8034
HTTP_START_PORT=8044
# Stream writer workers to generate. Feel free to add or remove them as you wish.
# Event persister ("events") isn't included here as it does not require its
# own HTTP listener.
STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" )
NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]})
i=0
while [ $i -lt "$NUM_WRITERS" ]
do
cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml
worker_app: synapse.app.generic_worker
worker_name: ${STREAM_WRITERS[$i]}_stream_writer
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: $(expr $REP_START_PORT + $i)
resources:
- names: [replication]
- type: http
port: $(expr $HTTP_START_PORT + $i)
resources:
- names: [client]
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
host: 127.0.0.1
port: $(expr $REP_START_PORT + $i)
"
HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer
"
((i++))
done
cat << EXAMPLECONFIG
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information.
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
$HOMESERVER_YAML_INSTANCE_MAP
stream_writers:
$HOMESERVER_YAML_STREAM_WRITERS
EXAMPLECONFIG
```
Copy the code above save it to a file ```create_stream_writers.sh``` (for example).
Make the script executable by running ```chmod +x create_stream_writers.sh```.
## Run the script to create workers and print out a sample configuration
Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```.
```console
$ ./create_stream_writers.sh
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
presence_stream_writer:
host: 127.0.0.1
port: 8034
typing_stream_writer:
host: 127.0.0.1
port: 8035
receipts_stream_writer:
host: 127.0.0.1
port: 8036
to_device_stream_writer:
host: 127.0.0.1
port: 8037
account_data_stream_writer:
host: 127.0.0.1
port: 8038
stream_writers:
presence: presence_stream_writer
typing: typing_stream_writer
receipts: receipts_stream_writer
to_device: to_device_stream_writer
account_data: account_data_stream_writer
```
Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file.
## Write directly to Synapse configuration file
You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```.
If you do this, back up your original configuration file first:
```console
# Back up homeserver.yaml first
cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak
# Create workers and write output to your homeserver.yaml
./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml
```

View File

@@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0
pip install poetry==1.2.0b1
poetry export \
--extras all \
--extras test \
@@ -61,7 +61,7 @@ dh_virtualenv \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
@@ -78,14 +78,9 @@ case "$DEB_BUILD_OPTIONS" in
cp -r tests "$tmpdir"
# To avoid pulling in the unbuilt Synapse in the local directory
pushd /
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
popd
;;
esac

201
debian/changelog vendored
View File

@@ -1,204 +1,3 @@
matrix-synapse-py3 (1.70.1) stable; urgency=medium
* New Synapse release 1.70.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
matrix-synapse-py3 (1.70.0) stable; urgency=medium
* New Synapse release 1.70.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
* New Synapse release 1.70.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
* New Synapse release 1.70.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
matrix-synapse-py3 (1.69.0) stable; urgency=medium
* New Synapse release 1.69.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Oct 2022 11:31:03 +0100
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
* New Synapse release 1.69.0rc4.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
* New Synapse release 1.69.0rc3.
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
* New Synapse release 1.69.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Oct 2022 14:45:00 +0100
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
* The man page for the hash_password script has been updated to reflect
the correct default value of 'bcrypt_rounds'.
* New Synapse release 1.69.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
matrix-synapse-py3 (1.68.0) stable; urgency=medium
* New Synapse release 1.68.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Sep 2022 12:02:09 +0100
matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
* New Synapse release 1.68.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
* New Synapse release 1.68.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
matrix-synapse-py3 (1.67.0) stable; urgency=medium
* New Synapse release 1.67.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Sep 2022 09:19:56 +0100
matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium
[ Erik Johnston ]
* Use stable poetry 1.2.0 version, rather than a prerelease.
[ Synapse Packaging team ]
* New Synapse release 1.67.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Sep 2022 09:01:06 +0100
matrix-synapse-py3 (1.66.0) stable; urgency=medium
* New Synapse release 1.66.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 31 Aug 2022 11:20:17 +0100
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
[ Jörg Behrmann ]
* Update debhelper to compatibility level 12.
* Drop the preinst script stopping synapse.
* Allocate a group for the system user.
* Change dpkg-statoverride to --force-statoverride-add.
[ Erik Johnston ]
* Disable `dh_auto_configure` as it broke during Rust build.
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
* New Synapse release 1.66.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Aug 2022 12:25:19 +0100
matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium
* New Synapse release 1.66.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Aug 2022 09:48:55 +0100
matrix-synapse-py3 (1.65.0) stable; urgency=medium
* New Synapse release 1.65.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Aug 2022 16:51:26 +0100
matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium
* New Synapse release 1.65.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Aug 2022 11:38:18 +0100
matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium
* New Synapse release 1.65.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Aug 2022 11:39:29 +0100
matrix-synapse-py3 (1.64.0) stable; urgency=medium
* New Synapse release 1.64.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Aug 2022 10:32:30 +0100
matrix-synapse-py3 (1.64.0~rc2) stable; urgency=medium
* New Synapse release 1.64.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 29 Jul 2022 12:22:53 +0100
matrix-synapse-py3 (1.64.0~rc1) stable; urgency=medium
* New Synapse release 1.64.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Jul 2022 12:11:49 +0100
matrix-synapse-py3 (1.63.1) stable; urgency=medium
* New Synapse release 1.63.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Jul 2022 13:36:52 +0100
matrix-synapse-py3 (1.63.0) stable; urgency=medium
* Clarify that homeserver server names are included in the data reported
by opt-in server stats reporting (`report_stats` homeserver config option).
* New Synapse release 1.63.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Jul 2022 14:42:24 +0200
matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium
* New Synapse release 1.63.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Jul 2022 11:26:02 +0100
matrix-synapse-py3 (1.62.0) stable; urgency=medium
* New Synapse release 1.62.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Jul 2022 11:14:15 +0100
matrix-synapse-py3 (1.62.0~rc3) stable; urgency=medium
* New Synapse release 1.62.0rc3.
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Jul 2022 16:07:01 +0100
matrix-synapse-py3 (1.62.0~rc2) stable; urgency=medium
* New Synapse release 1.62.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Jul 2022 11:42:41 +0100
matrix-synapse-py3 (1.62.0~rc1) stable; urgency=medium
* New Synapse release 1.62.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 16:34:57 +0100
matrix-synapse-py3 (1.61.1) stable; urgency=medium
* New Synapse release 1.61.1.

1
debian/compat vendored Normal file
View File

@@ -0,0 +1 @@
10

2
debian/control vendored
View File

@@ -4,7 +4,7 @@ Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
Build-Depends:
debhelper-compat (= 12),
debhelper (>= 10),
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,

View File

@@ -10,7 +10,7 @@
.P
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
.P
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
.P
The hashed password is written on the \fBSTDOUT\fR\.
.SH "FILES"

View File

@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
It accepts an YAML file which can be used to specify parameters like the
number of rounds for bcrypt and password_config section having the pepper
value used for the hashing. By default `bcrypt_rounds` is set to **12**.
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
The hashed password is written on the `STDOUT`.

View File

@@ -31,7 +31,7 @@ EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report homeserver usage statistics.
# Whether to report anonymized homeserver usage statistics.
report_stats: false
EOF
fi
@@ -40,12 +40,12 @@ EOF
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
if ! getent passwd $USER >/dev/null; then
adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
fi
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR
dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
fi
done

31
debian/matrix-synapse-py3.preinst vendored Normal file
View File

@@ -0,0 +1,31 @@
#!/bin/sh -e
# Attempt to undo some of the braindamage caused by
# https://github.com/matrix-org/package-synapse-debian/issues/18.
#
# Due to reasons [1], the old python2 matrix-synapse package will not stop the
# service when the package is uninstalled. Our maintainer scripts will do the
# right thing in terms of ensuring the service is enabled and unmasked, but
# then do a `systemctl start matrix-synapse`, which of course does nothing -
# leaving the old (py2) service running.
#
# There should normally be no reason for the service to be running during our
# preinst, so we assume that if it *is* running, it's due to that situation,
# and stop it.
#
# [1] dh_systemd_start doesn't do anything because it sees that there is an
# init.d script with the same name, so leaves it to dh_installinit.
#
# dh_installinit doesn't do anything because somebody gave it a --no-start
# for unknown reasons.
if [ -x /bin/systemctl ]; then
if /bin/systemctl --quiet is-active -- matrix-synapse; then
echo >&2 "stopping existing matrix-synapse service"
/bin/systemctl stop matrix-synapse || true
fi
fi
#DEBHELPER#
exit 0

2
debian/matrix-synapse.default vendored Normal file
View File

@@ -0,0 +1,2 @@
# Specify environment variables used when running Synapse
# SYNAPSE_CACHE_FACTOR=0.5 (default)

View File

@@ -5,6 +5,7 @@ Description=Synapse Matrix homeserver
Type=notify
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=-/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
ExecReload=/bin/kill -HUP $MAINPID
@@ -12,10 +13,5 @@ Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse
# The environment file is not shipped by default anymore and the below directive
# is for backwards compatibility only. Please use your homeserver.yaml if
# possible.
EnvironmentFile=-/etc/default/matrix-synapse
[Install]
WantedBy=multi-user.target

View File

@@ -37,7 +37,7 @@ msgstr ""
#. Type: boolean
#. Description
#: ../templates:2001
msgid "Report homeserver usage statistics?"
msgid "Report anonymous statistics?"
msgstr ""
#. Type: boolean
@@ -45,11 +45,11 @@ msgstr ""
#: ../templates:2001
msgid ""
"Developers of Matrix and Synapse really appreciate helping the project out "
"by reporting homeserver usage statistics from this homeserver. Your "
"homeserver's server name, along with very basic aggregate data (e.g. "
"number of users) will be reported. But it helps track the growth of the "
"Matrix community, and helps in making Matrix a success, as well as to "
"convince other networks that they should peer with Matrix."
"by reporting anonymized usage statistics from this homeserver. Only very "
"basic aggregate data (e.g. number of users) will be reported, but it helps "
"track the growth of the Matrix community, and helps in making Matrix a "
"success, as well as to convince other networks that they should peer with "
"Matrix."
msgstr ""
#. Type: boolean

14
debian/rules vendored
View File

@@ -6,19 +6,15 @@
# assume we only have one package
PACKAGE_NAME:=`dh_listpackages`
override_dh_installsystemd:
dh_installsystemd --name=matrix-synapse
override_dh_systemd_enable:
dh_systemd_enable --name=matrix-synapse
override_dh_installinit:
dh_installinit --name=matrix-synapse
# we don't really want to strip the symbols from our object files.
override_dh_strip:
override_dh_auto_configure:
# many libraries pulled from PyPI have allocatable sections after
# non-allocatable ones on which dwz errors out. For those without the issue the
# gains are only marginal
override_dh_dwz:
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
# (executables and shared libs) in the package, and looks for the shared
# libraries that they depend on. It then adds a dependency on the package that

13
debian/templates vendored
View File

@@ -10,13 +10,12 @@ _Description: Name of the server:
Template: matrix-synapse/report-stats
Type: boolean
Default: false
_Description: Report homeserver usage statistics?
_Description: Report anonymous statistics?
Developers of Matrix and Synapse really appreciate helping the
project out by reporting homeserver usage statistics from this
homeserver. Your homeserver's server name, along with very basic
aggregate data (e.g. number of users) will be reported. But it
helps track the growth of the Matrix community, and helps in
making Matrix a success, as well as to convince other networks
that they should peer with Matrix.
project out by reporting anonymized usage statistics from this
homeserver. Only very basic aggregate data (e.g. number of users)
will be reported, but it helps track the growth of the Matrix
community, and helps in making Matrix a success, as well as to
convince other networks that they should peer with Matrix.
.
Thank you.

View File

@@ -31,9 +31,7 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -42,49 +40,40 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential cargo git libffi-dev libssl-dev \
apt-get update && apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry==1.2.0"
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
COPY pyproject.toml poetry.lock /synapse/
# If specified, we won't verify the hashes of dependencies.
# This is only needed if the hashes of dependencies cannot be checked for some
# reason, such as when a git repository is used directly as a dependency.
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
# If specified, we won't use the Poetry lockfile.
# Instead, we'll just install what a regular `pip install` would from PyPI.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
touch /synapse/requirements.txt; \
fi
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
###
### Stage 1: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
@@ -94,26 +83,10 @@ RUN \
libxml++2.6-dev \
libxslt1-dev \
openssl \
rustc \
zlib1g-dev \
git \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source
@@ -125,29 +98,17 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
fi
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 2: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
FROM docker.io/python:${PYTHON_VERSION}-slim
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
@@ -157,7 +118,7 @@ LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
apt-get update && apt-get install -y \
curl \
gosu \
libjpeg62-turbo \

View File

@@ -72,7 +72,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
@@ -86,15 +85,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
libpq-dev \
xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a

View File

@@ -1,66 +1,37 @@
# syntax=docker/dockerfile:1
# Inherit from the official Synapse docker image
FROM matrixdotorg/synapse
ARG SYNAPSE_VERSION=latest
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
redis-server nginx-light
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
FROM debian:bullseye-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
redis-server nginx-light
# Disable the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Similarly, a base to copy the redis server from.
#
# The redis docker image has fewer dynamic libraries than the debian package,
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
FROM redis:6-bullseye AS redis_base
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# now build the final image, based on the the regular Synapse docker image
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
RUN mkdir -p /etc/supervisor/conf.d
# Expose nginx listener port
EXPOSE 8080/tcp
# Copy over redis and nginx
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
COPY --from=deps_base /etc/nginx /etc/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
RUN chown www-data /var/lib/nginx
# have nginx log to stderr/out
RUN ln -sf /dev/stdout /var/log/nginx/access.log
RUN ln -sf /dev/stderr /var/log/nginx/error.log
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Expose nginx listener port
EXPOSE 8080/tcp
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -8,23 +8,13 @@ docker images that can be run inside Complement for testing purposes.
Note that running Synapse's unit tests from within the docker image is not supported.
## Using the Complement launch script
## Testing with SQLite and single-process Synapse
`scripts-dev/complement.sh` is a script that will automatically build
and run Synapse against Complement.
Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
> Note that `scripts-dev/complement.sh` is a script that will automatically build
> and run an SQLite-based, single-process of Synapse against Complement.
[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
## Building and running the images manually
Under some circumstances, you may wish to build the images manually.
The instructions below will lead you to doing that.
Note that these images can only be built using [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/),
therefore BuildKit needs to be enabled when calling `docker build`. This can be done by
setting `DOCKER_BUILDKIT=1` in your environment.
The instructions below will set up Complement testing for a single-process,
SQLite-based Synapse deployment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
release of Synapse, instead of your current checkout, you can skip this step. From the
@@ -34,17 +24,12 @@ root of the repository:
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
Next, build the workerised Synapse docker image, which is a layer over the base
image.
This will build an image with the tag `matrixdotorg/synapse`.
Next, build the Synapse image for Complement.
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
Finally, build the multi-purpose image for Complement, which is a layer over the workers image.
```sh
docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement
docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
```
This will build an image with the tag `complement-synapse`, which can be handed to
@@ -52,9 +37,49 @@ Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Ref
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
See [the Complement image README](./complement/README.md) for information about the
expected environment variables.
## Testing with PostgreSQL and single or multi-process Synapse
The above docker image only supports running Synapse with SQLite and in a
single-process topology. The following instructions are used to build a Synapse image for
Complement that supports either single or multi-process topology with a PostgreSQL
database backend.
As with the single-process image, build the base Synapse docker image. If you wish to run
tests with the latest release of Synapse, instead of your current checkout, you can skip
this step. From the root of the repository:
```sh
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
This will build an image with the tag `matrixdotorg/synapse`.
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
Again, from the root of the repository:
```sh
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
```
This will build an image with the tag` matrixdotorg/synapse-workers`.
It's worth noting at this point that this image is fully functional, and
can be used for testing against locally. See instructions for using the container
under
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
below.
Finally, build the Synapse image for Complement, which is based on
`matrixdotorg/synapse-workers`.
```sh
docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
```
This will build an image with the tag `complement-synapse-workers`, which can be handed to
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
how to run the tests, as well as the various available command line flags.
## Running the Dockerfile-worker image standalone
@@ -88,9 +113,6 @@ docker run -d --name synapse \
...substituting `POSTGRES*` variables for those that match a postgres host you have
available (usually a running postgres docker container).
### Workers
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
use when running the container. All possible worker names are defined by the keys of the
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
@@ -103,11 +125,8 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
(leaving only the main process).
The container will only be configured to use Redis-based worker mode if there are
workers enabled.
### Logging
(leaving only the main process). The container is configured to use redis-based worker
mode.
Logs for workers and the main process are logged to stdout and can be viewed with
standard `docker logs` tooling. Worker logs contain their worker name
@@ -117,21 +136,3 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
00, according to the container's clock. Logging for the main process must still be
configured by modifying the homeserver's log config in your Synapse data volume.
### Application Services
Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of
a directory (within the container) will cause the configuration script to scan
that directory for `.yaml`/`.yml` registration files.
Synapse will be configured to load these configuration files.
### TLS Termination
Nginx is present in the image to route requests to the appropriate workers,
but it does not serve TLS by default.
You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
TLS certificate and key (respectively), both in PEM (textual) format.
In this case, Nginx will additionally serve using HTTPS on port 8448.

View File

@@ -67,13 +67,6 @@ The following environment variables are supported in `generate` mode:
* `UID`, `GID`: the user id and group id to use for creating the data
directories. If unset, and no user is set via `docker run --user`, defaults
to `991`, `991`.
* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
Defaults to `INFO`.
* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
will log sensitive information such as access tokens.
This should not be needed unless you are a developer attempting to debug something
particularly tricky.
## Postgres
@@ -191,7 +184,7 @@ If you need to build the image from a Synapse checkout, use the following `docke
build` command from the repo's root:
```
DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
You can choose to build a different docker image by changing the value of the `-f` flag to
@@ -241,4 +234,4 @@ healthcheck:
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse
[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).

View File

@@ -1,56 +1,22 @@
# syntax=docker/dockerfile:1
# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
# A dockerfile which builds an image suitable for testing Synapse under
# complement.
ARG SYNAPSE_VERSION=latest
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
ENV SERVER_NAME=localhost
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
COPY conf/* /conf/
# Configure a password and create a database for Synapse
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
# generate a signing key
RUN generate_signing_key -o /conf/server.signing.key
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.
# To do this, we copy the old template out of the way and then include it
# with Jinja2.
RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
WORKDIR /data
WORKDIR /data
EXPOSE 8008 8448
COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
ENTRYPOINT ["/conf/start.sh"]
# Copy the entrypoint
COPY conf/start_for_complement.sh /
# Expose nginx's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start_for_complement.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -1,32 +1 @@
# Unified Complement image for Synapse
This is an image for testing Synapse with [the *Complement* integration test suite][complement].
It contains some insecure defaults that are only suitable for testing purposes,
so **please don't use this image for a production server**.
This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory
and can be switched using environment variables between the following configurations:
- Monolithic Synapse with SQLite (default, or `SYNAPSE_COMPLEMENT_DATABASE=sqlite`)
- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`)
- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`)
The image is self-contained; it contains an integrated Postgres, Redis and Nginx.
## How to get Complement to pass the environment variables through
To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv]
variable to configure an environment prefix to pass through, then prefix the above options
with that prefix.
Example:
```
COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
```
Consult `scripts-dev/complement.sh` in the repository root for a real example.
[complement]: https://github.com/matrix-org/complement
[complementEnv]: https://github.com/matrix-org/complement/pull/382
Stuff for building the docker image used for testing under complement.

View File

@@ -0,0 +1,40 @@
# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.
#
# Instructions for building this image from those it depends on is detailed in this guide:
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
FROM matrixdotorg/synapse-workers
# Install postgresql
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
\"ALTER USER postgres PASSWORD 'somesecret'; \
CREATE DATABASE synapse \
ENCODING 'UTF8' \
LC_COLLATE='C' \
LC_CTYPE='C' \
template=template0;\" | psql" && pg_ctlcluster 13 main stop
# Modify the shared homeserver config with postgres support, certificate setup
# and the disabling of rate-limiting
COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
WORKDIR /data
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
# Copy the entrypoint
COPY conf-workers/start-complement-synapse-workers.sh /
# Expose nginx's listener ports
EXPOSE 8008 8448
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh

View File

@@ -1,8 +1,5 @@
[program:postgres]
command=/usr/local/bin/prefix-log gosu postgres postgres
# Only start if START_POSTGRES=1
autostart=%(ENV_START_POSTGRES)s
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
# Lower priority number = starts first
priority=1

View File

@@ -0,0 +1,61 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
appservice, \
pusher"
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
fi
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
# Note that both the key and certificate are in PEM format (not DER).
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -1,19 +1,9 @@
{#
This file extends the default 'shared' configuration file (from the 'synapse-workers'
docker image) with Complement-specific tweak.
The base configuration is moved out of the default path to `shared-orig.yaml.j2`
in the Complement Dockerfile and below we include that original file.
#}
## Server ##
report_stats: False
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
url_preview_enabled: true
url_preview_ip_range_blacklist: []
## Registration ##
@@ -69,10 +59,6 @@ rc_joins:
per_second: 9999
burst_count: 9999
rc_joins_per_room:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000
@@ -87,21 +73,13 @@ rc_invites:
federation_rr_transactions_per_room_per_second: 9999
allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
{% if not workers_in_use %}
# client-side support for partial state in /send_join responses
faster_joins: true
{% endif %}
# Enable spaces support
spaces_enabled: true
# Enable jump to date endpoint
msc3030_enabled: true
@@ -110,11 +88,3 @@ server_notices:
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"
# Disable sync cache so that initial `/sync` requests are up-to-date.
caches:
sync_response_cache_duration: 0
{% include "shared-orig.yaml.j2" %}

View File

@@ -0,0 +1,129 @@
## Server ##
server_name: SERVER_NAME
log_config: /conf/log_config.yaml
report_stats: False
signing_key_path: /conf/server.signing.key
trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
## Listeners ##
tls_certificate_path: /conf/server.tls.crt
tls_private_key_path: /conf/server.tls.key
bcrypt_rounds: 4
registration_shared_secret: complement
listeners:
- port: 8448
bind_addresses: ['::']
type: http
tls: true
resources:
- names: [federation]
- port: 8008
bind_addresses: ['::']
type: http
resources:
- names: [client]
## Database ##
database:
name: "sqlite3"
args:
# We avoid /data, as it is a volume and is not transferred when the container is committed,
# which is a fundamental necessity in complement.
database: "/conf/homeserver.db"
## Federation ##
# trust certs signed by the complement CA
federation_custom_ca_list:
- /complement/ca/ca.crt
# unblacklist RFC1918 addresses
ip_range_blacklist: []
# Disable server rate-limiting
rc_federation:
window_size: 1000
sleep_limit: 10
sleep_delay: 500
reject_limit: 99999
concurrent: 3
rc_message:
per_second: 9999
burst_count: 9999
rc_registration:
per_second: 9999
burst_count: 9999
rc_login:
address:
per_second: 9999
burst_count: 9999
account:
per_second: 9999
burst_count: 9999
failed_attempts:
per_second: 9999
burst_count: 9999
rc_admin_redaction:
per_second: 9999
burst_count: 9999
rc_joins:
local:
per_second: 9999
burst_count: 9999
remote:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000
rc_invites:
per_room:
per_second: 1000
burst_count: 1000
per_user:
per_second: 1000
burst_count: 1000
federation_rr_transactions_per_room_per_second: 9999
## API Configuration ##
# A list of application service config files to use
#
app_service_config_files:
AS_REGISTRATION_FILES
## Experimental Features ##
experimental_features:
# Enable spaces support
spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable jump to date endpoint
msc3030_enabled: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"
system_mxid_avatar_url: ""
room_name: "Server Alert"

View File

@@ -0,0 +1,24 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
# log to stdout, for easier use with 'docker logs'
stream: 'ext://sys.stdout'
root:
level: INFO
handlers: [console]
disable_existing_loggers: false

30
docker/complement/conf/start.sh Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/sh
set -e
sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
# Add the application service registration files to the homeserver.yaml config
for filename in /complement/appservice/*.yaml; do
[ -f "$filename" ] || break
as_id=$(basename "$filename" .yaml)
# Insert the path to the registration file and the AS_REGISTRATION_FILES marker after
# so we can add the next application service in the next iteration of this for loop
sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
done
# Remove the AS_REGISTRATION_FILES entry
sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
# generate an ssl key and cert for the server, signed by the complement CA
openssl genrsa -out /conf/server.tls.key 2048
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}"
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt
exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"

View File

@@ -1,110 +0,0 @@
#!/bin/bash
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
}
# Set the server name of the homeserver
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
# No need to report stats here
export SYNAPSE_REPORT_STATS=no
case "$SYNAPSE_COMPLEMENT_DATABASE" in
postgres)
# Set postgres authentication details which will be placed in the homeserver config file
export POSTGRES_PASSWORD=somesecret
export POSTGRES_USER=postgres
export POSTGRES_HOST=localhost
# configure supervisord to start postgres
export START_POSTGRES=true
;;
sqlite|"")
# Configure supervisord not to start Postgres, as we don't need it
export START_POSTGRES=false
;;
*)
echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2
exit 1
;;
esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
user_dir, \
media_repository, \
federation_inbound, \
federation_reader, \
federation_sender, \
synchrotron, \
client_reader, \
appservice, \
pusher"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
# Empty string here means 'main process only'
export SYNAPSE_WORKER_TYPES=""
fi
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
fi
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
# Note that both the key and certificate are in PEM format (not DER).
# First generate a configuration file to set up a Subject Alternative Name.
cat > /conf/server.tls.conf <<EOF
.include /etc/ssl/openssl.cnf
[SAN]
subjectAltName=DNS:${SERVER_NAME}
EOF
# Generate an RSA key
openssl genrsa -out /conf/server.tls.key 2048
# Generate a certificate signing request
openssl req -new -config /conf/server.tls.conf -key /conf/server.tls.key -out /conf/server.tls.csr \
-subj "/CN=${SERVER_NAME}" -reqexts SAN
# Make the Complement Certificate Authority sign and generate a certificate.
openssl x509 -req -in /conf/server.tls.csr \
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
# Assert that we have a Subject Alternative Name in the certificate.
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py

View File

@@ -3,10 +3,8 @@
# configure_workers_and_start.py uses and amends to this file depending on the workers
# that have been selected.
{% if enable_redis %}
redis:
enabled: true
{% endif %}
{% if appservice_registrations is not none %}
## Application Services ##

View File

@@ -19,7 +19,7 @@ username=www-data
autorestart=true
[program:redis]
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
@@ -28,6 +28,17 @@ stderr_logfile_maxbytes=0
username=redis
autorestart=true
# Redis can be disabled if the image is being used without workers
autostart={{ enable_redis }}
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
# Additional process blocks
{{ worker_config }}

View File

@@ -1,52 +0,0 @@
{% if use_forking_launcher %}
[program:synapse_fork]
command=/usr/local/bin/python -m synapse.app.complement_fork_starter
{{ main_config_path }}
synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
{%- for worker in workers %}
-- {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
--config-path=/conf/workers/{{ worker.name }}.yaml
{%- endfor %}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
{% else %}
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
{% for worker in workers %}
[program:synapse_{{ worker.name }}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
--config-path=/conf/workers/{{ worker.name }}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
{% endfor %}
{% endif %}

View File

@@ -2,11 +2,7 @@ version: 1
formatters:
precise:
{% if include_worker_name_in_log_line %}
format: '{{ worker_name }} | %(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% else %}
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
{% endif %}
handlers:
{% if LOG_FILE_PATH %}
@@ -49,17 +45,11 @@ handlers:
class: logging.StreamHandler
formatter: precise
{% if not SYNAPSE_LOG_SENSITIVE %}
{#
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
so that DEBUG entries (containing sensitive information) are not emitted.
#}
loggers:
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
{% endif %}
root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}

View File

@@ -26,27 +26,19 @@
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
# Nginx will be configured to serve TLS on port 8448.
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
# only intended for usage in Complement at the moment.
# No stability guarantees are provided.
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
# log level. INFO is the default.
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
# continue to work if so.
import os
import platform
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
import jinja2
import yaml
from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
@@ -60,12 +52,12 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"user_dir": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.user_dir",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
],
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
"shared_extra_conf": {"update_user_directory": False},
"worker_extra_conf": "",
},
"media_repository": {
@@ -86,7 +78,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -108,34 +100,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"client_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client"],
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
"^/_matrix/client/v1/rooms/.*/hierarchy$",
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
"^/_matrix/client/v1/rooms/.*/threads$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
"^/_matrix/client/versions$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
"^/_matrix/client/(r0|v3|unstable)/register$",
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
@@ -212,6 +176,21 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
}
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
"""
NGINX_LOCATION_CONFIG_BLOCK = """
location ~* {endpoint} {{
proxy_pass {upstream};
@@ -230,19 +209,24 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
"""Log something to the stdout.
Args:
txt: The text to log.
"""
print(txt)
def error(txt: str) -> NoReturn:
print(txt, file=sys.stderr)
"""Log something and exit with an error code.
Args:
txt: The text to log in error.
"""
log(txt)
sys.exit(2)
def flush_buffers() -> None:
sys.stdout.flush()
sys.stderr.flush()
def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template
@@ -252,13 +236,12 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
template_vars: The arguments to replace placeholder variables in the template with.
"""
# Read the template file
# We disable autoescape to prevent template variables from being escaped,
# as we're not using HTML.
env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
template = env.get_template(os.path.basename(src))
with open(src) as infile:
template = infile.read()
# Generate a string from the template.
rendered = template.render(**template_vars)
# Generate a string from the template. We disable autoescape to prevent template
# variables from being escaped.
rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
# Write the generated contents to a file
#
@@ -323,7 +306,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
def generate_worker_files(
@@ -369,10 +352,13 @@ def generate_worker_files(
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# List of dicts that describe workers.
# We pass this to the Supervisor template later to generate the appropriate
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
@@ -392,13 +378,13 @@ def generate_worker_files(
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
if not worker_types_env:
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types_env is None:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma, ignoring whitespace.
worker_types = [x.strip() for x in worker_types_env.split(",")]
# Split type names by comma
worker_types = worker_types_env.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -417,6 +403,8 @@ def generate_worker_files(
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_type = worker_type.strip()
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
@@ -448,7 +436,7 @@ def generate_worker_files(
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
@@ -518,16 +506,12 @@ def generate_worker_files(
if reg_path.suffix.lower() in (".yaml", ".yml")
]
workers_in_use = len(worker_types) > 0
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
)
# Nginx config
@@ -546,15 +530,7 @@ def generate_worker_files(
"/conf/supervisord.conf.j2",
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
)
convert(
"/conf/synapse.supervisord.conf.j2",
"/etc/supervisor/conf.d/synapse.conf",
workers=worker_descriptors,
main_config_path=config_path,
use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
worker_config=supervisord_config,
)
# healthcheck config
@@ -578,25 +554,18 @@ def generate_worker_log_config(
Returns: the path to the generated file
"""
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args: Dict[str, Optional[str]] = {}
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
"SYNAPSE_LOG_SENSITIVE"
)
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
include_worker_name_in_log_line=environ.get(
"SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
),
)
return log_config_filepath
@@ -626,24 +595,14 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
with open(mark_filepath, "w") as f:
f.write("")
# Lifted right out of start.py
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log("Could not find %s, will not use" % (jemallocpath,))
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
flush_buffers()
os.execle(
os.execl(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
environ,
)

View File

@@ -13,19 +13,14 @@ import jinja2
# Utility functions
def log(txt: str) -> None:
print(txt)
print(txt, file=sys.stderr)
def error(txt: str) -> NoReturn:
print(txt, file=sys.stderr)
log(txt)
sys.exit(2)
def flush_buffers() -> None:
sys.stdout.flush()
sys.stderr.flush()
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -115,11 +110,7 @@ def generate_config_from_template(
log_config_file = environ["SYNAPSE_LOG_CONFIG"]
log("Generating log config file " + log_config_file)
convert(
"/conf/log.config",
log_config_file,
{**environ, "include_worker_name_in_log_line": False},
)
convert("/conf/log.config", log_config_file, environ)
# Hopefully we already have a signing key, but generate one if not.
args = [
@@ -136,10 +127,10 @@ def generate_config_from_template(
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
subprocess.run(args, check=True)
subprocess.check_output(args)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
@@ -163,7 +154,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
subprocess.run(["chown", ownership, data_dir], check=True)
subprocess.check_output(["chown", ownership, data_dir])
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -190,7 +181,6 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
flush_buffers()
os.execv(sys.executable, args)
@@ -273,10 +263,8 @@ running with 'migrate_config'. See the README for more details.
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
flush_buffers()
os.execve("/usr/sbin/gosu", args, environ)
else:
flush_buffers()
os.execve(sys.executable, args, environ)

View File

@@ -1,12 +1,26 @@
# This file is maintained as an up-to-date snapshot of the default
# homeserver.yaml configuration generated by Synapse. You can find a
# complete accounting of possible configuration options at
# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html
# homeserver.yaml configuration generated by Synapse.
#
# It is intended to act as a reference for the default configuration,
# helping admins keep track of new options and other changes, and compare
# their configs with the current default. As such, many of the actual
# config values shown are placeholders.
#
# It is *not* intended to be copied and used as the basis for a real
# homeserver.yaml. Instead, if you are starting from scratch, please generate
# a fresh config using Synapse by following the instructions in
# https://matrix-org.github.io/synapse/latest/setup/installation.html.
#
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
# s = second
# m = minute
# h = hour
# d = day
# w = week
# y = year
# For example, setting redaction_retention_period: 5m would remove redacted
# messages from the database after 5 minutes, rather than 5 months.
################################################################################

View File

@@ -35,6 +35,7 @@
- [Application Services](application_services.md)
- [Server Notices](server_notices.md)
- [Consent Tracking](consent_tracking.md)
- [URL Previews](development/url_previews.md)
- [User Directory](user_directory.md)
- [Message Retention Policies](message_retention_policies.md)
- [Pluggable Modules](modules/index.md)
@@ -54,6 +55,7 @@
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Delete Group](admin_api/delete_group.md)
- [Event Reports](admin_api/event_reports.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
@@ -68,8 +70,6 @@
- [Federation](usage/administration/admin_api/federation.md)
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
- [Monthly Active Users](usage/administration/monthly_active_users.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
@@ -81,7 +81,6 @@
# Development
- [Contributing Guide](development/contributing_guide.md)
- [Code Style](code_style.md)
- [Reviewing Code](development/reviews.md)
- [Release Cycle](development/releases.md)
- [Git Usage](development/git.md)
- [Testing]()
@@ -89,7 +88,6 @@
- [OpenTracing](opentracing.md)
- [Database Schemas](development/database_schema.md)
- [Experimental features](development/experimental_features.md)
- [Dependency management](development/dependencies.md)
- [Synapse Architecture]()
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)

View File

@@ -0,0 +1,14 @@
# Delete a local group
This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group
being deleted.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
POST /_synapse/admin/v1/delete_group/<group_id>
```

View File

@@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
in the homeserver configuration), and a one-time nonce. If the registration
shared secret is not configured, this API is not enabled.
(`registration_shared_secret` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API:
@@ -46,24 +46,7 @@ As an example:
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, either the
string "admin" or "notadmin", and optionally the user_type
each separated by NULs.
Here is an easy way to generate the HMAC digest if you have Bash and OpenSSL:
```bash
# Update these values and then paste this code block into a bash terminal
nonce='thisisanonce'
username='pepper_roni'
password='pizza'
admin='admin'
secret='shared_secret'
printf '%s\0%s\0%s\0%s' "$nonce" "$username" "$password" "$admin" |
openssl sha1 -hmac "$secret" |
awk '{print $2}'
```
For an example of generation in Python:
each separated by NULs. For an example of generation in Python:
```python
import hmac, hashlib
@@ -87,4 +70,4 @@ def generate_mac(nonce, user, password, admin=False, user_type=None):
mac.update(user_type.encode('utf8'))
return mac.hexdigest()
```
```

View File

@@ -59,7 +59,6 @@ The following fields are possible in the JSON response body:
- `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
- `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
- `state_events` - Total number of state_events of a room. Complexity of the room.
- `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. If the room does not define a type, the value will be `null`.
* `offset` - The current pagination offset in rooms. This parameter should be
used instead of `next_token` for room offset as `next_token` is
not intended to be parsed.
@@ -102,8 +101,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534,
"room_type": "m.space"
"state_events": 93534
},
... (8 hidden items) ...
{
@@ -120,8 +118,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345,
"room_type": null
"state_events": 8345
}
],
"offset": 0,
@@ -154,8 +151,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8,
"room_type": null
"state_events": 8
}
],
"offset": 0,
@@ -188,8 +184,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534,
"room_type": null
"state_events": 93534
},
... (98 hidden items) ...
{
@@ -206,8 +201,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345,
"room_type": "m.space"
"state_events": 8345
}
],
"offset": 0,
@@ -244,9 +238,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534,
"room_type": "m.space"
"state_events": 93534
},
... (48 hidden items) ...
{
@@ -263,9 +255,7 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 8345,
"room_type": null
"state_events": 8345
}
],
"offset": 100,
@@ -300,10 +290,6 @@ The following fields are possible in the JSON response body:
* `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
* `state_events` - Total number of state_events of a room. Complexity of the room.
* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space.
If the room does not define a type, the value will be `null`.
* `forgotten` - Whether all local users have
[forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room.
The API is:
@@ -331,14 +317,10 @@ A response body like the following is returned:
"join_rules": "invite",
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534,
"room_type": "m.space",
"forgotten": false
"state_events": 93534
}
```
_Changed in Synapse 1.66:_ Added the `forgotten` key to the response body.
# Room Members API
The Room Members admin API allows server admins to get a list of all members of a room.
@@ -393,151 +375,6 @@ A response body like the following is returned:
}
```
# Room Messages API
The Room Messages admin API allows server admins to get all messages
sent to a room in a given timeframe. There are various parameters available
that allow for filtering and ordering the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/messages
```
**Parameters**
The following path parameters are required:
* `room_id` - The ID of the room you wish you fetch messages from.
The following query parameters are available:
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
* `to` - The token to spot returning events at.
* `limit` - The maximum number of events to return. Defaults to `10`.
* `filter` - A JSON RoomEventFilter to filter returned events with.
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
this value to `b` will reverse the above sort order. Defaults to `f`.
**Response**
The following fields are possible in the JSON response body:
* `chunk` - A list of room events. The order depends on the dir parameter.
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
If no further events are available, this property is omitted from the response.
* `start` - A token corresponding to the start of chunk.
* `state` - A list of state events relevant to showing the chunk.
**Example**
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
```json
{
"chunk": [
{
"content": {
"body": "This is an example text message",
"format": "org.matrix.custom.html",
"formatted_body": "<b>This is an example text message</b>",
"msgtype": "m.text"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"type": "m.room.message",
"unsigned": {
"age": 1234
}
},
{
"content": {
"name": "The room name"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"state_key": "",
"type": "m.room.name",
"unsigned": {
"age": 1234
}
},
{
"content": {
"body": "Gangnam Style",
"info": {
"duration": 2140786,
"h": 320,
"mimetype": "video/mp4",
"size": 1563685,
"thumbnail_info": {
"h": 300,
"mimetype": "image/jpeg",
"size": 46144,
"w": 300
},
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
"w": 480
},
"msgtype": "m.video",
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
},
"event_id": "$143273582443PhrSn:example.org",
"origin_server_ts": 1432735824653,
"room_id": "!636q39766251:example.com",
"sender": "@example:example.org",
"type": "m.room.message",
"unsigned": {
"age": 1234
}
}
],
"end": "t47409-4357353_219380_26003_2265",
"start": "t47429-4392820_219380_26003_2265"
}
```
# Room Timestamp to Event API
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
Useful for cases like jump to date so you can start paginating messages from
a given date in the archive.
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
```
**Parameters**
The following path parameters are required:
* `room_id` - The ID of the room you wish to check.
The following query parameters are available:
* `ts` - a timestamp in milliseconds where we will find the closest event in
the given direction.
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp. Defaults to `f`.
**Response**
* `event_id` - converted from timestamp
# Block Room API
The Block Room admin API allows server admins to block and unblock rooms,
and query to see if a given room is blocked.

View File

@@ -42,7 +42,6 @@ It returns a JSON body like the following:
"appservice_id": null,
"consent_server_notice_sent": null,
"consent_version": null,
"consent_ts": null,
"external_ids": [
{
"auth_provider": "<provider1>",
@@ -125,8 +124,9 @@ Body parameters:
- `address` - string. Value of third-party ID.
belonging to a user.
- `external_ids` - array, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). Details in the configuration manual under the
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
provider for SSO (Single sign-on). Details in
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
section `sso` and `oidc_providers`.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided
value is not in the homeserver configuration.
@@ -365,7 +365,6 @@ The following actions are **NOT** performed. The list may be incomplete.
- Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users
- Remove user's consent information (consent version and timestamp)
## Reset password
@@ -546,7 +545,7 @@ Gets a list of all local media that a specific `user_id` has created.
These are media that the user has uploaded themselves
([local media](../media_repository.md#local-media)), as well as
[URL preview images](../media_repository.md#url-previews) requested by the user if the
[feature is enabled](../usage/configuration/config_documentation.md#url_preview_enabled).
[feature is enabled](../development/url_previews.md).
By default, the response is ordered by descending creation date and ascending media ID.
The newest media is on top. You can change the order with parameters
@@ -755,7 +754,6 @@ A response body like the following is returned:
"device_id": "QBUAZIFURK",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
},
@@ -763,7 +761,6 @@ A response body like the following is returned:
"device_id": "AUIECTSRND",
"display_name": "ios",
"last_seen_ip": "1.2.3.5",
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775025,
"user_id": "<user_id>"
}
@@ -790,8 +787,6 @@ The following fields are returned in the JSON response body:
Absent if no name has been set.
- `last_seen_ip` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
- `last_seen_user_agent` - The user agent of the device when it was last seen.
(May be a few minutes out of date, for efficiency reasons).
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- `user_id` - Owner of device.
@@ -843,7 +838,6 @@ A response body like the following is returned:
"device_id": "<device_id>",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
}
@@ -865,8 +859,6 @@ The following fields are returned in the JSON response body:
Absent if no name has been set.
- `last_seen_ip` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
- `last_seen_user_agent` - The user agent of the device when it was last seen.
(May be a few minutes out of date, for efficiency reasons).
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- `user_id` - Owner of device.
@@ -1155,41 +1147,3 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
### Find a user based on their ID in an auth provider
The API is:
```
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
```
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
```json
{
"user_id": "@hello:example.org"
}
```
**Parameters**
The following parameters should be set in the URL:
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
**Errors**
Returns a `404` HTTP status code if no user was found, with a response body like this:
```json
{
"errcode":"M_NOT_FOUND",
"error":"User not found"
}
```
_Added in Synapse 1.68.0._

View File

@@ -34,45 +34,13 @@ the process of indexing it).
## Chain Cover Index
Synapse computes auth chain differences by pre-computing a "chain cover" index
for the auth chain in a room, allowing us to efficiently make reachability queries
like "is event `A` in the auth chain of event `B`?". We could do this with an index
that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this
would be prohibitively large, scaling poorly as the room accumulates more state
events.
for the auth chain in a room, allowing efficient reachability queries like "is
event A in the auth chain of event B". This is done by assigning every event a
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
is in the auth chain of `B`) if and only if either:
Instead, we break down the graph into *chains*. A chain is a subset of a DAG
with the following property: for any pair of events `E` and `F` in the chain,
the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be
linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain
is given a *sequence number* local to that chain. The oldest event `E` in the
chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has
sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has
sequence number 3; and so on.
Synapse ensures that each persisted event belongs to exactly one chain, and
tracks how the chains are connected to one another. This allows us to
efficiently answer reachability queries. Doing so uses less storage than
tracking reachability on an event-by-event basis, particularly when we have
fewer and longer chains. See
> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944).
> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598.
for the original idea or
> Y. Chen, Y. Chen, [An efficient algorithm for answering graph
> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498),
> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008,
> pp. 893902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).)
for a more modern take.
In practical terms, the chain cover assigns every event a
*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links*
between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B`
(i.e. `A` is in the auth chain of `B`) if and only if either:
1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
sequence number; or
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
@@ -81,9 +49,8 @@ There are actually two potential implementations, one where we store links from
each chain to every other reachable chain (the transitive closure of the links
graph), and one where we remove redundant links (the transitive reduction of the
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
would not be stored. Synapse uses the former implementation so that it doesn't
need to recurse to test reachability between chains. This trades-off extra storage
in order to save CPU cycles and DB queries.
would not be stored. Synapse uses the former implementations so that it doesn't
need to recurse to test reachability between chains.
### Example

View File

@@ -70,61 +70,82 @@ on save as they take a while and can be very resource intensive.
- Avoid wildcard imports (`from synapse.types import *`) and
relative imports (`from .types import UserID`).
## Configuration code and documentation format
## Configuration file format
When adding a configuration option to the code, if several settings are grouped into a single dict, ensure that your code
correctly handles the top-level option being set to `None` (as it will be if no sub-options are enabled).
The [configuration manual](usage/configuration/config_documentation.md) acts as a
The [sample configuration file](./sample_config.yaml) acts as a
reference to Synapse's configuration options for server administrators.
Remember that many readers will be unfamiliar with YAML and server
administration in general, so it is important that when you add
a configuration option the documentation be as easy to understand as possible, which
includes following a consistent format.
administration in general, so that it is important that the file be as
easy to understand as possible, which includes following a consistent
format.
Some guidelines follow:
- Each option should be listed in the config manual with the following format:
- The name of the option, prefixed by `###`.
- Sections should be separated with a heading consisting of a single
line prefixed and suffixed with `##`. There should be **two** blank
lines before the section header, and **one** after.
- Each option should be listed in the file with the following format:
- A comment describing the setting. Each line of this comment
should be prefixed with a hash (`#`) and a space.
- A comment which describes the default behaviour (i.e. what
The comment should describe the default behaviour (ie, what
happens if the setting is omitted), as well as what the effect
will be if the setting is changed.
- An example setting, using backticks to define the code block
Often, the comment end with something like "uncomment the
following to <do action>".
- A line consisting of only `#`.
- A commented-out example setting, prefixed with only `#`.
For boolean (on/off) options, convention is that this example
should be the *opposite* to the default. For other options, the example should give
some non-default value which is likely to be useful to the reader.
should be the *opposite* to the default (so the comment will end
with "Uncomment the following to enable [or disable]
<feature>." For other options, the example should give some
non-default value which is likely to be useful to the reader.
- There should be a horizontal rule between each option, which can be achieved by adding `---` before and
after the option.
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
- There should be a blank line between each option.
- Where several settings are grouped into a single dict, *avoid* the
convention where the whole block is commented out, resulting in
comment lines starting `# #`, as this is hard to read and confusing
to edit. Instead, leave the top-level config option uncommented, and
follow the conventions above for sub-options. Ensure that your code
correctly handles the top-level option being set to `None` (as it
will be if no sub-options are enabled).
- Lines should be wrapped at 80 characters.
- Use two-space indents.
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
(`` ` ``) to refer to configuration options.
Example:
---
### `modules`
Use the `module` sub-option to add a module under `modules` to extend functionality.
The `module` setting then has a sub-option, `config`, which can be used to define some configuration
for the `module`.
Defaults to none.
Example configuration:
```yaml
modules:
- module: my_super_module.MySuperClass
config:
do_thing: true
- module: my_other_super_module.SomeClass
config: {}
## Frobnication ##
# The frobnicator will ensure that all requests are fully frobnicated.
# To enable it, uncomment the following.
#
#frobnicator_enabled: true
# By default, the frobnicator will frobnicate with the default frobber.
# The following will make it use an alternative frobber.
#
#frobincator_frobber: special_frobber
# Settings for the frobber
#
frobber:
# frobbing speed. Defaults to 1.
#
#speed: 10
# frobbing distance. Defaults to 1000.
#
#distance: 100
```
---
Note that the sample configuration is generated from the synapse code
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
Making sure that the output from this script matches the desired format
is left as an exercise for the reader!

View File

@@ -1,9 +1,9 @@
Deprecation Policy for Platform Dependencies
============================================
Synapse has a number of platform dependencies, including Python, Rust,
PostgreSQL and SQLite. This document outlines the policy towards which versions
we support, and when we drop support for versions in the future.
Synapse has a number of platform dependencies, including Python and PostgreSQL.
This document outlines the policy towards which versions we support, and when we
drop support for versions in the future.
Policy
@@ -17,14 +17,6 @@ Details on the upstream support life cycles for Python and PostgreSQL are
documented at [https://endoflife.date/python](https://endoflife.date/python) and
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
A Rust compiler is required to build Synapse from source. For any given release
the minimum required version may be bumped up to a recent Rust version, and so
people building from source should ensure they can fetch recent versions of Rust
(e.g. by using [rustup](https://rustup.rs/)).
The oldest supported version of SQLite is the version
[provided](https://packages.debian.org/buster/libsqlite3-0) by
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context
-------
@@ -39,15 +31,3 @@ long process.
By following the upstream support life cycles Synapse can ensure that its
dependencies continue to get security patches, while not requiring system admins
to constantly update their platform dependencies to the latest versions.
For Rust, the situation is a bit different given that a) the Rust foundation
does not generally support older Rust versions, and b) the library ecosystem
generally bump their minimum support Rust versions frequently. In general, the
Synapse team will try to avoid updating the dependency on Rust to the absolute
latest version, but introducing a formal policy is hard given the constraints of
the ecosystem.
On a similar note, SQLite does not generally have a concept of "supported
release"; bugfixes are published for the latest minor release only. We chose to
track Debian's oldstable as this is relatively conservative, predictably updated
and is consistent with the `.deb` packages released by Matrix.org.

View File

@@ -28,9 +28,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
A recent version of the Rust compiler is needed to build the native modules. The
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
# 3. Get the source.
@@ -65,8 +62,6 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Synapse requires Poetry version 1.2.0 or later.
Next, open a terminal and install dependencies as follows:
```sh
@@ -117,11 +112,6 @@ Some documentation also exists in [Synapse's GitHub
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
When changes are made to any Rust code then you must call either `poetry install`
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
is quicker than `poetry install`, so is recommended when making frequent
changes to the Rust code.
# 8. Test, test, test!
<a name="test-test-test"></a>
@@ -167,12 +157,6 @@ was broken. They are slower than the linters but will typically catch more error
poetry run trial tests
```
You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
```sh
poetry run trial -j4 tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
@@ -209,7 +193,7 @@ The database file can then be inspected with:
sqlite3 _trial_temp/test.db
```
Note that the database file is cleared at the beginning of each test run. Thus it
Note that the database file is cleared at the beginning of each test run. Thus it
will always only contain the data generated by the *last run test*. Though generally
when debugging, one is only running a single test anyway.
@@ -320,29 +304,6 @@ To run a specific test, you can specify the whole name structure:
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
```
The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try:
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
```
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
You can then use this incantation to format the tests appropriately:
```sh
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -json | gotestfmt -hide successful-tests
```
(Remove `-hide successful-tests` if you don't want to hide successful tests.)
### Access database for homeserver after Complement test runs.
@@ -367,7 +328,7 @@ To prepare a Pull Request, please:
3. `git push` your commit to your fork of Synapse;
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
5. add a [changelog entry](#changelog) and push it to your Pull Request;
6. that's it for now, a non-draft pull request will automatically request review from the team;
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
@@ -390,7 +351,7 @@ This file will become part of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
release, so the content of the file should be a short description of your
change in the same style as the rest of the changelog. The file can contain Markdown
formatting, and must end with a full stop (.) or an exclamation mark (!) for
formatting, and should end with a full stop (.) or an exclamation mark (!) for
consistency.
Adding credits to the changelog is encouraged, we value your
@@ -543,13 +504,10 @@ From this point, you should:
1. Look at the results of the CI pipeline.
- If there is any error, fix the error.
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
- A pull request is a conversation, if you disagree with the suggestions, please respond and discuss it.
3. Create a new commit with the changes.
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
- Push this commits to your Pull Request.
4. Back to 1.
5. Once the pull request is ready for review again please re-request review from whichever developer did your initial
review (or leave a comment in the pull request that you believe all required changes have been done).
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!

View File

@@ -191,28 +191,3 @@ There are three separate aspects to this:
flavour will be accepted by SQLite 3.22, but will give a column whose
default value is the **string** `"FALSE"` - which, when cast back to a boolean
in Python, evaluates to `True`.
## `event_id` global uniqueness
`event_id`'s can be considered globally unique although there has been a lot of
debate on this topic in places like
[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01). There are several places in Synapse
and even in the Matrix APIs like [`GET
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
where we assume that event IDs are globally unique.
When scoping `event_id` in a database schema, it is often nice to accompany it
with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
makes it very easy to find and clean up everything in a room when it needs to be
purged (no need to use sub-`select` query or join from the `events` table).
A note on collisions: In room versions `1` and `2` it's possible to end up with
two events with the same `event_id` (in the same or different rooms). After room
version `3`, that can only happen with a hash collision, which we basically hope
will never happen (SHA256 has a massive big key space).

View File

@@ -126,23 +126,6 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
poetry install --extras all --remove-untracked
```
## ...delete everything and start over from scratch?
```shell
# Stop the current virtualenv if active
$ deactivate
# Remove all of the files from the current environment.
# Don't worry, even though it says "all", this will only
# remove the Poetry virtualenvs for the current project.
$ poetry env remove --all
# Reactivate Poetry shell to create the virtualenv again
$ poetry shell
# Install everything again
$ poetry install --extras all
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
@@ -254,35 +237,3 @@ poetry run pip install build && poetry run python -m build
because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.
# Troubleshooting
## Check the version of poetry with `poetry --version`.
The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
poetry-core`.
## Clear caches: `poetry cache clear --all pypi`.
Poetry caches a bunch of information about packages that isn't readily available
from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
## Remove outdated egg-info
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
install.
This stores some cached information about dependencies and often conflicts with
letting Poetry do the right thing.
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.

View File

@@ -1,41 +0,0 @@
Some notes on how we do reviews
===============================
The Synapse team works off a shared review queue -- any new pull requests for
Synapse (or related projects) has a review requested from the entire team. Team
members should process this queue using the following rules:
* Any high urgency pull requests (e.g. fixes for broken continuous integration
or fixes for release blockers);
* Follow-up reviews for pull requests which have previously received reviews;
* Any remaining pull requests.
For the latter two categories above, older pull requests should be prioritised.
It is explicit that there is no priority given to pull requests from the team
(vs from the community). If a pull request requires a quick turn around, please
explicitly communicate this via [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)
or as a comment on the pull request.
Once an initial review has been completed and the author has made additional changes,
follow-up reviews should go back to the same reviewer. This helps build a shared
context and conversation between author and reviewer.
As a team we aim to keep the number of inflight pull requests to a minimum to ensure
that ongoing work is finished before starting new work.
Performing a review
-------------------
To communicate to the rest of the team the status of each pull request, team
members should do the following:
* Assign themselves to the pull request (they should be left assigned to the
pull request until it is merged, closed, or are no longer the reviewer);
* Review the pull request by leaving comments, questions, and suggestions;
* Mark the pull request appropriately (as needing changes or accepted).
If you are unsure about a particular part of the pull request (or are not confident
in your understanding of part of the code) then ask questions or request review
from the team again. When requesting review from the team be sure to leave a comment
with the rationale on why you're putting it back in the queue.

View File

@@ -0,0 +1,61 @@
URL Previews
============
The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix
specific additions).
This does have trade-offs compared to other designs:
* Pros:
* Simple and flexible; can be used by any clients at any point
* Cons:
* If each homeserver provides one of these independently, all the HSes in a
room may needlessly DoS the target URI
* The URL metadata must be stored somewhere, rather than just using Matrix
itself to store the media.
* Matrix cannot be used to distribute the metadata between homeservers.
When Synapse is asked to preview a URL it does the following:
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
config).
2. Checks the in-memory cache by URLs and returns the result if it exists. (This
is also used to de-duplicate processing of multiple in-flight requests at once.)
3. Kicks off a background process to generate a preview:
1. Checks the database cache by URL and timestamp and returns the result if it
has not expired and was successful (a 2xx return code).
2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
does, update the URL to download.
3. Downloads the URL and stores it into a file via the media storage provider
and saves the local media metadata.
4. If the media is an image:
1. Generates thumbnails.
2. Generates an Open Graph response based on image properties.
5. If the media is HTML:
1. Decodes the HTML via the stored file.
2. Generates an Open Graph response from the HTML.
3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
1. Downloads the URL and stores it into a file via the media storage provider
and saves the local media metadata.
2. Convert the oEmbed response to an Open Graph response.
3. Override any Open Graph data from the HTML with data from oEmbed.
4. If an image exists in the Open Graph response:
1. Downloads the URL and stores it into a file via the media storage
provider and saves the local media metadata.
2. Generates thumbnails.
3. Updates the Open Graph response based on image properties.
6. If the media is JSON and an oEmbed URL was found:
1. Convert the oEmbed response to an Open Graph response.
2. If a thumbnail or image is in the oEmbed response:
1. Downloads the URL and stores it into a file via the media storage
provider and saves the local media metadata.
2. Generates thumbnails.
3. Updates the Open Graph response based on image properties.
7. Stores the result in the database cache.
4. Returns the result.
The in-memory cache expires after 1 hour.
Expired entries in the database cache (and their associated media files) are
deleted every 10 seconds. The default expiration time is 1 hour from download.

View File

@@ -37,26 +37,27 @@ As with other login types, there are additional fields (e.g. `device_id` and
## Preparing Synapse
The JSON Web Token integration in Synapse uses the
[`Authlib`](https://docs.authlib.org/en/latest/index.html) library, which must be installed
[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed
as follows:
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* The relevant libraries are included in the Docker images and Debian packages
provided by `matrix.org` so no further action is needed.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[jwt]` to install the necessary dependencies.
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
install synapse[pyjwt]` to install the necessary dependencies.
* For other installation mechanisms, see the documentation provided by the
maintainer.
* For other installation mechanisms, see the documentation provided by the
maintainer.
To enable the JSON web token integration, you should then add a `jwt_config` option
to your configuration file. See the [configuration manual](usage/configuration/config_documentation.md#jwt_config) for some
To enable the JSON web token integration, you should then add an `jwt_config` section
to your configuration file (or uncomment the `enabled: true` line in the
existing section). See [sample_config.yaml](./sample_config.yaml) for some
sample settings.
## How to test JWT as a developer
Although JSON Web Tokens are typically generated from an external server, the
example below uses a locally generated JWT.
examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly.
1. Configure Synapse with JWT logins, note that this example uses a pre-shared
secret and an algorithm of HS256:
@@ -69,21 +70,10 @@ example below uses a locally generated JWT.
```
2. Generate a JSON web token:
You can use the following short Python snippet to generate a JWT
protected by an HMAC.
Take care that the `secret` and the algorithm given in the `header` match
the entries from `jwt_config` above.
```python
from authlib.jose import jwt
header = {"alg": "HS256"}
payload = {"sub": "user1", "aud": ["audience"]}
secret = "my-secret-token"
result = jwt.encode(header, payload, secret)
print(result.decode("ascii"))
```bash
$ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc
```
3. Query for the login types and ensure `org.matrix.login.jwt` is there:
```bash

View File

@@ -13,10 +13,8 @@ environments where untrusted users have shell access.
## Configuring the manhole
To enable it, first add the `manhole` listener configuration in your
`homeserver.yaml`. You can find information on how to do that
in the [configuration manual](usage/configuration/config_documentation.md#manhole_settings).
The configuration is slightly different if you're using docker.
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`. The configuration is slightly different if you're using docker.
#### Docker config

View File

@@ -7,7 +7,8 @@ The media repository
users.
* caches avatars, attachments and their thumbnails for media uploaded by remote
users.
* caches resources and thumbnails used for URL previews.
* caches resources and thumbnails used for
[URL previews](development/url_previews.md).
All media in Matrix can be identified by a unique
[MXC URI](https://spec.matrix.org/latest/client-server-api/#matrix-content-mxc-uris),
@@ -58,6 +59,8 @@ remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg
Note that `remote_thumbnail/` does not have an `s`.
## URL Previews
See [URL Previews](development/url_previews.md) for documentation on the URL preview
process.
When generating previews for URLs, Synapse may download and cache various
resources, including images. These resources are assigned temporary media IDs

View File

@@ -8,8 +8,7 @@ and allow server and room admins to configure how long messages should
be kept in a homeserver's database before being purged from it.
**Please note that, as this feature isn't part of the Matrix
specification yet, this implementation is to be considered as
experimental. There are known bugs which may cause database corruption.
Proceed with caution.**
experimental.**
A message retention policy is mainly defined by its `max_lifetime`
parameter, which defines how long a message can be kept around after
@@ -50,9 +49,9 @@ clients.
## Server configuration
Support for this feature can be enabled and configured by adding a the
`retention` in the Synapse configuration file (see
[configuration manual](usage/configuration/config_documentation.md#retention)).
Support for this feature can be enabled and configured in the
`retention` section of the Synapse configuration file (see the
[sample file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518)).
To enable support for message retention policies, set the setting
`enabled` in this section to `true`.
@@ -66,8 +65,8 @@ message retention policy configured in its state. This allows server
admins to ensure that messages are never kept indefinitely in a server's
database.
A default policy can be defined as such, by adding the `retention` option in
the configuration file and adding these sub-options:
A default policy can be defined as such, in the `retention` section of
the configuration file:
```yaml
default_policy:
@@ -87,8 +86,8 @@ Purge jobs are the jobs that Synapse runs in the background to purge
expired events from the database. They are only run if support for
message retention policies is enabled in the server's configuration. If
no configuration for purge jobs is configured by the server admin,
Synapse will use a default configuration, which is described here in the
[configuration manual](usage/configuration/config_documentation.md#retention).
Synapse will use a default configuration, which is described in the
[sample configuration file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518).
Some server admins might want a finer control on when events are removed
depending on an event's room's policy. This can be done by setting the
@@ -138,8 +137,8 @@ the server's database.
### Lifetime limits
Server admins can set limits on the values of `max_lifetime` to use when
purging old events in a room. These limits can be defined under the
`retention` option in the configuration file:
purging old events in a room. These limits can be defined as such in the
`retention` section of the configuration file:
```yaml
allowed_lifetime_min: 1d

View File

@@ -7,30 +7,17 @@
1. Enable Synapse metrics:
In `homeserver.yaml`, make sure `enable_metrics` is
set to `True`.
1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
collect data:
There are two methods of enabling the metrics endpoint in Synapse.
There are two methods of enabling metrics in Synapse.
The first serves the metrics as a part of the usual web server and
can be enabled by adding the `metrics` resource to the existing
listener as such as in this example:
can be enabled by adding the \"metrics\" resource to the existing
listener as such:
```yaml
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
# added "metrics" in this line
- names: [client, federation, metrics]
compress: false
resources:
- names:
- client
- metrics
```
This provides a simple way of adding metrics to your Synapse
@@ -44,26 +31,19 @@
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
Add a new listener to homeserver.yaml as in this example:
Add a new listener to homeserver.yaml:
```yaml
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
compress: false
# beginning of the new metrics listener
- port: 9000
type: metrics
bind_addresses: ['::1', '127.0.0.1']
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
```
For both options, you will need to ensure that `enable_metrics` is
set to `True`.
1. Restart Synapse.
1. Add a Prometheus target for Synapse.
@@ -152,8 +132,6 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
convention of the upstream `prometheus_client`. The old names are
considered deprecated and will be removed in a future version of
Synapse.
**The old names will be disabled by default in Synapse v1.71.0 and removed
altogether in Synapse v1.73.0.**
| New Name | Old Name |
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@@ -165,13 +143,6 @@ altogether in Synapse v1.73.0.**
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
| synapse_util_caches_cache | synapse_util_caches_cache:total |
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
@@ -287,7 +258,7 @@ Standard Metric Names
As of synapse version 0.18.2, the format of the process-wide metrics has
been changed to fit prometheus standard naming conventions. Additionally
the units have been changed to seconds, from milliseconds.
the units have been changed to seconds, from miliseconds.
| New name | Old name |
| ---------------------------------------- | --------------------------------- |

View File

@@ -263,7 +263,7 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("my_field"):
return (self.api.get_qualified_user_id(username), None)
return self.api.get_qualified_user_id(username)
async def check_pass(
self,
@@ -280,5 +280,5 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("password"):
return (self.api.get_qualified_user_id(username), None)
return self.api.get_qualified_user_id(username)
```

View File

@@ -38,13 +38,15 @@ this callback.
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_join_room(user: str, room: str, is_invited: bool) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
```
Called when a user is trying to join a room. The user is represented by their Matrix user ID (e.g.
Called when a user is trying to join a room. The module must return a `bool` to indicate
whether the user can join the room. Return `False` to prevent the user from joining the
room; otherwise return `True` to permit the joining.
The user is represented by their Matrix user ID (e.g.
`@alice:example.com`) and the room is represented by its Matrix ID (e.g.
`!room:example.com`). The module is also given a boolean to indicate whether the user
currently has a pending invite in the room.
@@ -52,67 +54,46 @@ currently has a pending invite in the room.
This callback isn't called if the join is performed by a server administrator, or in the
context of a room creation.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_invite`
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
```
Called when processing an invitation. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`).
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
Called when processing an invitation. The module must return a `bool` indicating whether
the inviter can invite the invitee to the given room. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`). Return `False` to prevent
the invitation; otherwise return `True` to permit it.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_send_3pid_invite`
_First introduced in Synapse v1.45.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_send_3pid_invite(
inviter: str,
medium: str,
address: str,
room_id: str,
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
) -> bool
```
Called when processing an invitation using a third-party identifier (also called a 3PID,
e.g. an email address or a phone number).
e.g. an email address or a phone number). The module must return a `bool` indicating
whether the inviter can invite the invitee to the given room. Return `False` to prevent
the invitation; otherwise return `True` to permit it.
The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
invitee is represented by its medium (e.g. "email") and its address
@@ -134,108 +115,63 @@ await user_may_send_3pid_invite(
**Note**: If the third-party identifier is already associated with a matrix user ID,
[`user_may_invite`](#user_may_invite) will be used instead.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_create_room`
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_create_room(user: str) -> bool
```
Called when processing a room creation request.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
Called when processing a room creation request. The module must return a `bool` indicating
whether the given user (represented by their Matrix user ID) is allowed to create a room.
Return `False` to prevent room creation; otherwise return `True` to permit it.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_create_room_alias`
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_create_room_alias(user_id: str, room_alias: "synapse.module_api.RoomAlias") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
```
Called when trying to associate an alias with an existing room.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
Called when trying to associate an alias with an existing room. The module must return a
`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
to set the given alias. Return `False` to prevent the alias creation; otherwise return
`True` to permit it.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `user_may_publish_room`
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def user_may_publish_room(user_id: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_publish_room(user: str, room_id: str) -> bool
```
Called when trying to publish a room to the homeserver's public rooms directory.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
Called when trying to publish a room to the homeserver's public rooms directory. The
module must return a `bool` indicating whether the given user (represented by their
Matrix user ID) is allowed to publish the given room. Return `False` to prevent the
room from being published; otherwise return `True` to permit its publication.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `True`, Synapse falls through to the next one. The value of the first
callback that does not return `True` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_username_for_spam`
@@ -303,32 +239,21 @@ this callback.
_First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
```python
async def check_media_file_for_spam(
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
file_info: "synapse.rest.media.v1._base.FileInfo",
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
) -> bool
```
Called when storing a local or remote file.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
Called when storing a local or remote file. The module must return a `bool` indicating
whether the given file should be excluded from the homeserver's media store. Return
`True` to prevent this file from being stored; otherwise return `False`.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
callback returns `False`, Synapse falls through to the next one. The value of the first
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `should_drop_federated_event`
@@ -391,9 +316,6 @@ class ListSpamChecker:
resource=IsUserEvilResource(config),
)
async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[Literal["NOT_SPAM"], Codes]:
if event.sender in self.evil_users:
return Codes.FORBIDDEN
else:
return synapse.module_api.NOT_SPAM
async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[bool, str]:
return event.sender not in self.evil_users
```

View File

@@ -45,8 +45,8 @@ as follows:
maintainer.
To enable the OpenID integration, you should then add a section to the `oidc_providers`
setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
setting in your configuration file (or uncomment one of the existing examples).
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
the text below for example configurations for specific providers.
## Sample configs
@@ -174,9 +174,7 @@ oidc_providers:
1. Create a regular web application for Synapse
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
3. Add a rule with any name to add the `preferred_username` claim.
(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.)
3. Add a rule to add the `preferred_username` claim.
<details>
<summary>Code sample</summary>
@@ -336,12 +334,11 @@ oidc_providers:
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile", "email"] # email is optional, read below
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
@@ -424,7 +421,7 @@ Synapse config:
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
email_template: "{{ '{{ user.email }}' }}"
```
Relevant documents:

View File

@@ -57,9 +57,8 @@ https://www.jaegertracing.io/docs/latest/getting-started.
## Enable OpenTracing in Synapse
OpenTracing is not enabled by default. It must be enabled in the
homeserver config by adding the `opentracing` option to your config file. You can find
documentation about how to do this in the [config manual under the header 'Opentracing'](usage/configuration/config_documentation.md#opentracing).
See below for an example Opentracing configuration:
homeserver config by uncommenting the config options under `opentracing`
as shown in the [sample config](./sample_config.yaml). For example:
```yaml
opentracing:

Some files were not shown because too many files have changed in this diff Show More