Compare commits
1 Commits
release-v1
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab0f152504 |
@@ -27,10 +27,10 @@ which is under the Unlicense licence.
|
||||
{{- . -}}{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- with .TestCases -}}
|
||||
{{- /* Passing tests are first */ -}}
|
||||
{{- /* Failing tests are first */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
|
||||
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
|
||||
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
@@ -47,6 +47,7 @@ which is under the Unlicense licence.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Then skipped tests are second */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "SKIP" -}}
|
||||
@@ -67,10 +68,11 @@ which is under the Unlicense licence.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /* and failing tests are last */ -}}
|
||||
|
||||
{{- /* Then passing tests are last */ -}}
|
||||
{{- range . -}}
|
||||
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
|
||||
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
|
||||
# compatible wheel, if so rename the wheel before repairing it.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
from zipfile import ZipFile
|
||||
|
||||
from packaging.tags import Tag
|
||||
from packaging.utils import parse_wheel_filename
|
||||
from packaging.version import Version
|
||||
|
||||
|
||||
def check_is_abi3_compatible(wheel_file: str) -> None:
|
||||
"""Check the contents of the built wheel for any `.so` files that are *not*
|
||||
abi3 compatible.
|
||||
"""
|
||||
|
||||
with ZipFile(wheel_file, "r") as wheel:
|
||||
for file in wheel.namelist():
|
||||
if not file.endswith(".so"):
|
||||
continue
|
||||
|
||||
if not file.endswith(".abi3.so"):
|
||||
raise Exception(f"Found non-abi3 lib: {file}")
|
||||
|
||||
|
||||
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
|
||||
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
|
||||
|
||||
if tag.abi == "abi3":
|
||||
# Nothing to do.
|
||||
return wheel_file
|
||||
|
||||
check_is_abi3_compatible(wheel_file)
|
||||
|
||||
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
|
||||
|
||||
dirname = os.path.dirname(wheel_file)
|
||||
new_wheel_file = os.path.join(
|
||||
dirname,
|
||||
f"{name}-{version}-{abi3_tag}.whl",
|
||||
)
|
||||
|
||||
os.rename(wheel_file, new_wheel_file)
|
||||
|
||||
print("Renamed wheel to", new_wheel_file)
|
||||
|
||||
return new_wheel_file
|
||||
|
||||
|
||||
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
|
||||
"""Entry point"""
|
||||
|
||||
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
|
||||
# normalizes the package name (i.e. it converts matrix_synapse ->
|
||||
# matrix-synapse), which is not what we want.
|
||||
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
|
||||
name = os.path.basename(wheel_file).split("-")[0]
|
||||
|
||||
if len(tags) != 1:
|
||||
# We expect only a wheel file with only a single tag
|
||||
raise Exception(f"Unexpectedly found multiple tags: {tags}")
|
||||
|
||||
tag = next(iter(tags))
|
||||
|
||||
if build:
|
||||
# We don't use build tags in Synapse
|
||||
raise Exception(f"Unexpected build tag: {build}")
|
||||
|
||||
# If the wheel is for cpython then convert it into an abi3 wheel.
|
||||
if tag.interpreter.startswith("cp"):
|
||||
wheel_file = cpython(wheel_file, name, version, tag)
|
||||
|
||||
# Finally, repair the wheel.
|
||||
if archs is not None:
|
||||
# If we are given archs then we are on macos and need to use
|
||||
# `delocate-listdeps`.
|
||||
subprocess.run(["delocate-listdeps", wheel_file], check=True)
|
||||
subprocess.run(
|
||||
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
|
||||
check=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel-dir",
|
||||
"-w",
|
||||
metavar="WHEEL_DIR",
|
||||
help="Directory to store delocated wheels",
|
||||
required=True,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--require-archs",
|
||||
metavar="archs",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"wheel_file",
|
||||
metavar="WHEEL_FILE",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
wheel_file = args.wheel_file
|
||||
wheel_dir = args.wheel_dir
|
||||
archs = args.require_archs
|
||||
|
||||
main(wheel_file, wheel_dir, archs)
|
||||
@@ -1,135 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Calculate the trial jobs to run based on if we're in a PR or not.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def set_output(key: str, value: str):
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
|
||||
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
|
||||
print(f"{key}={value}", file=f)
|
||||
|
||||
|
||||
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_sqlite_tests.extend(
|
||||
{
|
||||
"python-version": version,
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "15",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
]
|
||||
|
||||
print("::group::Calculated trial jobs")
|
||||
print(
|
||||
json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
|
||||
)
|
||||
)
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
|
||||
)
|
||||
set_output("trial_test_matrix", test_matrix)
|
||||
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
print("::group::Calculated sytest jobs")
|
||||
print(json.dumps(sytest_tests, indent=4))
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(sytest_tests)
|
||||
set_output("sytest_test_matrix", test_matrix)
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# wraps `gotestfmt`, hiding output from successful packages unless
|
||||
# all tests passed.
|
||||
|
||||
set -o pipefail
|
||||
set -e
|
||||
|
||||
# tee the test results to a log, whilst also piping them into gotestfmt,
|
||||
# telling it to hide successful results, so that we can clearly see
|
||||
# unsuccessful results.
|
||||
tee complement.log | gotestfmt -hide successful-packages
|
||||
|
||||
# gotestfmt will exit non-zero if there were any failures, so if we got to this
|
||||
# point, we must have had a successful result.
|
||||
echo "All tests successful; showing all test results"
|
||||
|
||||
# Pipe the test results back through gotestfmt, showing all results.
|
||||
# The log file consists of JSON lines giving the test results, interspersed
|
||||
# with regular stdout lines (including reports of downloaded packages).
|
||||
grep '^{"Time":' complement.log | gotestfmt
|
||||
31
.ci/scripts/postgres_exec.py
Executable file
31
.ci/scripts/postgres_exec.py
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="localhost", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
||||
@@ -21,7 +21,7 @@ endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
|
||||
@@ -32,7 +32,7 @@ else
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
psql -c "CREATE DATABASE synapse"
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
|
||||
@@ -5,8 +5,18 @@
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
# - invokes `trial` to run the tests with old deps.
|
||||
|
||||
# Prevent tzdata from asking for user input
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
python3 python3-dev python3-pip python3-venv pipx \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
@@ -23,6 +33,12 @@ export VIRTUALENV_NO_DOWNLOAD=1
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
|
||||
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
|
||||
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
|
||||
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
|
||||
# runs on 3.8 (#12343).
|
||||
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e '/^python = "^/!s/\^/==/g' \
|
||||
@@ -39,7 +55,7 @@ sed -i \
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install toml wheel
|
||||
pip install --user toml
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
@@ -53,8 +69,8 @@ with open('pyproject.toml', 'w') as f:
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pip install poetry==1.2.0
|
||||
poetry lock
|
||||
pipx install poetry==1.1.14
|
||||
~/.local/bin/poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
@@ -62,3 +78,6 @@ echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
~/.local/bin/poetry install -E "all test"
|
||||
~/.local/bin/poetry run trial --jobs=2 tests
|
||||
@@ -2,27 +2,27 @@
|
||||
#
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - configures synapse and a postgres server.
|
||||
# - runs the port script on a prepopulated test sqlite db. Checks that the
|
||||
# return code is zero.
|
||||
# - reruns the port script on the same sqlite db, targetting the same postgres db.
|
||||
# Checks that the return code is zero.
|
||||
# - runs the port script against a new sqlite db. Checks the return code is zero.
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
#
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe -o pipefail
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
psql -c "CREATE DATABASE synapse"
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||
@@ -45,23 +45,9 @@ rm .ci/test_db.db
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
psql \
|
||||
-c "DROP DATABASE synapse" \
|
||||
-c "CREATE DATABASE synapse"
|
||||
poetry run .ci/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
echo "--- Create a brand new postgres database from schema"
|
||||
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
|
||||
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
|
||||
psql -c "CREATE DATABASE synapse_unported"
|
||||
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
|
||||
|
||||
echo "+++ Comparing ported schema with unported schema"
|
||||
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
|
||||
psql synapse -c "DROP TABLE port_from_sqlite3;"
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
|
||||
# By default, `diff` returns zero if there are no changes and nonzero otherwise
|
||||
diff -u unported.sql ported.sql | tee schema_diff
|
||||
@@ -4,15 +4,8 @@
|
||||
# things to include
|
||||
!docker
|
||||
!synapse
|
||||
!rust
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
!Cargo.toml
|
||||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
synapse/*.so
|
||||
|
||||
**/__pycache__
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.{py,pyi}]
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
9
.flake8
9
.flake8
@@ -8,11 +8,4 @@
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
#
|
||||
# flake8-bugbear runs extra checks. Its error codes are described at
|
||||
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
|
||||
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
|
||||
# B023: Functions defined inside a loop must not use variables redefined in the loop
|
||||
# B024: Abstract base class with no abstract method.
|
||||
|
||||
ignore=W503,W504,E203,E731,E501,B019,B023,B024
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
|
||||
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -74,36 +74,6 @@ body:
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: database
|
||||
attributes:
|
||||
label: Database
|
||||
description: |
|
||||
Are you using SQLite or PostgreSQL? What's the version of your database?
|
||||
|
||||
If PostgreSQL, please also answer the following:
|
||||
- are you using a single PostgreSQL server
|
||||
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
|
||||
- have you previously ported from SQLite using the Synapse "portdb" script?
|
||||
- have you previously restored from a backup?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: workers
|
||||
attributes:
|
||||
label: Workers
|
||||
description: |
|
||||
Are you running a single Synapse process, or are you running
|
||||
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
|
||||
options:
|
||||
- Single process
|
||||
- Multiple workers
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
@@ -113,28 +83,17 @@ body:
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Do you have any unusual config options turned on? If so, please provide details.
|
||||
|
||||
- Experimental or undocumented features
|
||||
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
|
||||
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
|
||||
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually impossible to diagnose without logging.**
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
23
.github/dependabot.yml
vendored
23
.github/dependabot.yml
vendored
@@ -1,23 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
|
||||
package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
versioning-strategy: "lockfile-only"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
46
.github/workflows/dependabot_changelog.yml
vendored
46
.github/workflows/dependabot_changelog.yml
vendored
@@ -1,46 +0,0 @@
|
||||
name: Write changelog for dependabot PR
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened # For debugging!
|
||||
|
||||
permissions:
|
||||
# Needed to be able to push the commit. See
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
|
||||
# for a similar example
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
add-changelog:
|
||||
runs-on: 'ubuntu-latest'
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
- name: Write, commit and push changelog
|
||||
run: |
|
||||
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
|
||||
git add changelog.d
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
git commit -m "Changelog"
|
||||
git push
|
||||
shell: bash
|
||||
# The `git push` above does not trigger CI on the dependabot PR.
|
||||
#
|
||||
# By default, workflows can't trigger other workflows when they're just using the
|
||||
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
|
||||
# recursive workflow loops by accident, because that'll get very expensive very
|
||||
# quickly.) Instead, you have to manually call out to another workflow, or else
|
||||
# make your changes (i.e. the `git push` above) using a personal access token.
|
||||
# See
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
#
|
||||
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
|
||||
# See git commit history for previous attempts. If anyone desperately wants to try
|
||||
# again in the future, make a matrix-bot account and use its access token to git push.
|
||||
|
||||
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
|
||||
# are sufficiently locked down to dependabot only as above.
|
||||
15
.github/workflows/docker.yml
vendored
15
.github/workflows/docker.yml
vendored
@@ -17,19 +17,19 @@ jobs:
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -48,15 +48,10 @@ jobs:
|
||||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
# arm64 builds OOM without the git fetch setting. c.f.
|
||||
# https://github.com/rust-lang/cargo/issues/10583
|
||||
build-args: |
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI=true
|
||||
|
||||
34
.github/workflows/docs-pr-netlify.yaml
vendored
34
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Deploy documentation PR preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Prepare documentation PR preview" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
netlify:
|
||||
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
name: book
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
branch: ${{ github.event.workflow_run.head_branch }}
|
||||
revision: ${{ github.event.workflow_run.head_sha }}
|
||||
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||
site_id: ${{ secrets.NETLIFY_SITE_ID }}
|
||||
desc: Documentation preview
|
||||
deployment_env: PR Documentation Preview
|
||||
34
.github/workflows/docs-pr.yaml
vendored
34
.github/workflows/docs-pr.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Prepare documentation PR preview
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- docs/**
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
# We'll only use this in a workflow_run, then we're done with it
|
||||
retention-days: 1
|
||||
8
.github/workflows/docs.yaml
vendored
8
.github/workflows/docs.yaml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
@@ -54,11 +54,11 @@ jobs:
|
||||
esac
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
|
||||
echo "::set-output name=branch-version::$branch"
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
|
||||
85
.github/workflows/latest_deps.yml
vendored
85
.github/workflows/latest_deps.yml
vendored
@@ -5,7 +5,7 @@
|
||||
#
|
||||
# As an overview this workflow:
|
||||
# - checks out develop,
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - runs mypy and test suites in that checkout.
|
||||
#
|
||||
# Based on the twisted trunk CI job.
|
||||
@@ -25,19 +25,13 @@ jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0"
|
||||
poetry-version: "1.2.0b1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -58,14 +52,7 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
@@ -74,7 +61,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -82,12 +69,6 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
- run: python -m twisted.trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -131,14 +112,7 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
run: rm /src/poetry.lock
|
||||
@@ -152,7 +126,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -161,56 +135,25 @@ jobs:
|
||||
/logs/**/*.log*
|
||||
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
# TODO: run complement (as with twisted trunk, see #12473).
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# Open an issue if the build fails, so we know about it.
|
||||
# Only do this if we're not experimenting with this action in a PR.
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
|
||||
if: failure()
|
||||
needs:
|
||||
# TODO: should mypy be included here? It feels more brittle than the others.
|
||||
# TODO: should mypy be included here? It feels more brittle than the other two.
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
- complement
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/latest_deps_build_failed_issue_template.md
|
||||
|
||||
|
||||
74
.github/workflows/push_complement_image.yml
vendored
74
.github/workflows/push_complement_image.yml
vendored
@@ -1,74 +0,0 @@
|
||||
# This task does not run complement tests, see tests.yaml instead.
|
||||
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
|
||||
|
||||
name: Store complement-synapse image in ghcr.io
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
default: 'develop'
|
||||
type: choice
|
||||
options:
|
||||
- develop
|
||||
- master
|
||||
|
||||
# Only run this action once per pull request/branch; restart if a new commit arrives.
|
||||
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
|
||||
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and push complement image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
|
||||
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
|
||||
type=sha,format=long
|
||||
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
|
||||
run: scripts-dev/complement.sh --build-only
|
||||
- name: Tag and push generated image
|
||||
run: |
|
||||
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
|
||||
echo "tag and push $TAG"
|
||||
docker tag complement-synapse $TAG
|
||||
docker push $TAG
|
||||
done
|
||||
112
.github/workflows/release-artifacts.yml
vendored
112
.github/workflows/release-artifacts.yml
vendored
@@ -11,12 +11,11 @@ on:
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
@@ -25,10 +24,8 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
@@ -36,7 +33,7 @@ jobs:
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "distros=$dists" >> "$GITHUB_OUTPUT"
|
||||
echo "::set-output name=distros::$dists"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
@@ -52,18 +49,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -71,9 +68,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
uses: actions/setup-python@v2
|
||||
|
||||
- name: Build the packages
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
@@ -89,96 +84,14 @@ jobs:
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-11"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
arch: aarch64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Build aarch64 wheels
|
||||
if: matrix.arch == 'aarch64'
|
||||
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Wheel
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
name: Build sdist
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- run: pip install build
|
||||
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
name: "Build pypi distribution files"
|
||||
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
@@ -186,12 +99,11 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
needs:
|
||||
- build-debs
|
||||
- build-wheels
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v2
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
|
||||
419
.github/workflows/tests.yml
vendored
419
.github/workflows/tests.yml
vendored
@@ -4,51 +4,26 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
|
||||
# don't modify Rust code.
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
with:
|
||||
filters: |
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/generate_sample_config.sh --check
|
||||
- run: poetry run scripts-dev/config-lint.sh
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install .
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
@@ -60,174 +35,80 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
lint-pydantic:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
lint-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
# We also lint against a nightly rustc so that we can lint the benchmark
|
||||
# suite, which requires a nightly compiler.
|
||||
lint-clippy-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs:
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
calculate-test-jobs:
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
|
||||
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: calculate-test-jobs
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
extras: ["all"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
extras: "all"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
# 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
|
||||
# 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
--tmpfs /var/lib/postgres:rw,size=6144m \
|
||||
--mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
extras: ${{ matrix.job.extras }}
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=6 tests
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: /var/run/postgresql
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
@@ -247,56 +128,16 @@ jobs:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
# Calculating the old-deps actually takes a bunch of time, so we cache the
|
||||
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
|
||||
# otherwise the `poetry install` step will error due to the poetry.lock
|
||||
# file being outdated.
|
||||
#
|
||||
# This caches the output of `Prepare old deps`, which should generate the
|
||||
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry-old-deps
|
||||
name: Cache poetry.lock
|
||||
with:
|
||||
path: |
|
||||
poetry.lock
|
||||
pyproject.toml
|
||||
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
run: .ci/scripts/prepare_old_deps.sh
|
||||
|
||||
# We only now install poetry so that `setup-python-poetry` caches the
|
||||
# right poetry.lock's dependencies.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j6 tests
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -322,7 +163,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -345,39 +186,50 @@ jobs:
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: calculate-test-jobs
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.job.workers && 1 }}
|
||||
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
@@ -385,10 +237,10 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
@@ -415,31 +267,28 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "11"
|
||||
postgres-version: "10"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -456,37 +305,13 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
# behind new releases, so we have to use the PostreSQL apt repository.
|
||||
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
|
||||
run: |
|
||||
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
id: run_tester_script
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
- name: "Upload schema differences"
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
|
||||
with:
|
||||
name: Schema dumps
|
||||
path: |
|
||||
unported.sql
|
||||
ported.sql
|
||||
schema_diff
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
@@ -503,65 +328,59 @@ jobs:
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
cargo-test:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
# XXX When complement with workers is stable, move this back into the standard
|
||||
# "complement" matrix above.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/issues/13161
|
||||
complement-workers:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
path: synapse
|
||||
|
||||
- run: cargo test
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- check-sampleconfig
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
- cargo-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
@@ -569,7 +388,5 @@ jobs:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
# Cargo test is skipped if there is no changes on Rust code
|
||||
skippable: |
|
||||
skippable:
|
||||
lint-newsfile
|
||||
cargo-test
|
||||
|
||||
15
.github/workflows/triage-incoming.yml
vendored
15
.github/workflows/triage-incoming.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Move new issues into the issue triage board
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
|
||||
with:
|
||||
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
|
||||
content_id: ${{ github.event.issue.node_id }}
|
||||
secrets:
|
||||
github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
|
||||
44
.github/workflows/triage_labelled.yml
vendored
44
.github/workflows/triage_labelled.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: Move labelled issues to correct projects
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ labeled ]
|
||||
|
||||
jobs:
|
||||
move_needs_info:
|
||||
name: Move X-Needs-Info on the triage board
|
||||
runs-on: ubuntu-latest
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@main
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
- name: Set status
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
run: |
|
||||
gh api graphql -f query='
|
||||
mutation(
|
||||
$project: ID!
|
||||
$item: ID!
|
||||
$fieldid: ID!
|
||||
$columnid: String!
|
||||
) {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $project
|
||||
itemId: $item
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: $columnid
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
|
||||
42
.github/workflows/twisted_trunk.yml
vendored
42
.github/workflows/twisted_trunk.yml
vendored
@@ -15,14 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -39,15 +32,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -79,14 +65,7 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
@@ -109,7 +88,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -135,8 +114,8 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -148,16 +127,17 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.2.0
|
||||
pipx install poetry==1.1.14
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.14 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
@@ -173,8 +153,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -15,9 +15,8 @@ _trial_temp*/
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# We do want the poetry and cargo lockfile.
|
||||
# We do want the poetry lockfile.
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
@@ -61,10 +60,3 @@ book/
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
/synapse/*.so
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
group_imports = "StdExternalCrate"
|
||||
1111
CHANGES.md
1111
CHANGES.md
File diff suppressed because it is too large
Load Diff
466
Cargo.lock
generated
466
Cargo.lock
generated
@@ -1,466 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.66"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "indoc"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.135"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.46"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
"indoc",
|
||||
"libc",
|
||||
"memoffset",
|
||||
"parking_lot",
|
||||
"pyo3-build-config",
|
||||
"pyo3-ffi",
|
||||
"pyo3-macros",
|
||||
"unindent",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
"pyo3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.150"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.150"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synapse"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"blake2",
|
||||
"hex",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"pyo3",
|
||||
"pyo3-log",
|
||||
"pythonize",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "target-lexicon"
|
||||
version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
|
||||
[[package]]
|
||||
name = "unindent"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
|
||||
dependencies = [
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
|
||||
@@ -1,5 +0,0 @@
|
||||
# We make the whole Synapse folder a workspace so that we can run `cargo`
|
||||
# commands from the root (rather than having to cd into rust/).
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
476
README.rst
476
README.rst
@@ -2,70 +2,152 @@
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|
||||
.. contents::
|
||||
|
||||
Installing and configuration
|
||||
============================
|
||||
Introduction
|
||||
============
|
||||
|
||||
The Synapse documentation describes `how to install Synapse <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
<https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
|
||||
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
client <----> homeserver <=====================> homeserver <----> client
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.libera.chat/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
|
||||
which handle:
|
||||
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
|
||||
These APIs are intended to be implemented on a wide range of servers, services
|
||||
and clients, letting developers build messaging and VoIP functionality on top
|
||||
of the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
For support installing or managing Synapse, please join |room|_ (from a matrix.org
|
||||
account if necessary) and ask questions there. We do not use GitHub issues for
|
||||
support requests, only for bug reports and feature requests.
|
||||
|
||||
Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
|
||||
with its source available in |docs|_.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
|
||||
.. _federation:
|
||||
|
||||
Synapse has a variety of `config options
|
||||
<https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
|
||||
which can be used to customise its behaviour after installation.
|
||||
There are additional details on how to `configure Synapse for federation here
|
||||
<https://matrix-org.github.io/synapse/latest/federate.html>`_.
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
----------------------------------
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
For information on configuring one, see `the reverse proxy docs
|
||||
<https://matrix-org.github.io/synapse/latest/reverse_proxy.html>`_.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
-----------------------------
|
||||
|
||||
The instructions for upgrading Synapse are in `the upgrade notes`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of Synapse.
|
||||
|
||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
||||
* For details on how to install synapse, see
|
||||
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
|
||||
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
||||
|
||||
|
||||
Platform dependencies
|
||||
---------------------
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`deprecation policy <https://matrix-org.github.io/synapse/latest/deprecation_policy.html>`_
|
||||
for more details.
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
.. _`client-user-reg`:
|
||||
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via a Matrix client.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Security note
|
||||
-------------
|
||||
=============
|
||||
|
||||
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
||||
repository endpoints`_.
|
||||
@@ -105,76 +187,30 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Testing a new installation
|
||||
==========================
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
The instructions for upgrading synapse are in `the upgrade notes`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
||||
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
.. _reverse-proxy:
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
.. _`client-user-reg`:
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it:
|
||||
|
||||
1. In the
|
||||
`registration config section <https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
|
||||
set ``enable_registration: true`` in ``homeserver.yaml``.
|
||||
2. Then **either**:
|
||||
|
||||
a. set up a `CAPTCHA <https://matrix-org.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
|
||||
b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
|
||||
|
||||
We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
|
||||
the public internet. Without it, anyone can freely register accounts on your homeserver.
|
||||
This can be exploited by attackers to create spambots targetting the rest of the Matrix
|
||||
federation.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
|
||||
The `Admin FAQ <https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
`Synapse's wider documentation <https://matrix-org.github.io/synapse/latest/>`_.
|
||||
|
||||
For additional support installing or managing Synapse, please ask in the community
|
||||
support room |room|_ (from a matrix.org account if necessary). We do not use GitHub
|
||||
issues for support requests, only for bug reports and feature requests.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
For information on configuring one, see `<docs/reverse_proxy.md>`_.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
@@ -206,15 +242,34 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Development
|
||||
===========
|
||||
Password reset
|
||||
==============
|
||||
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
|
||||
or by directly editing the database as shown below.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ ~/synapse/env/bin/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the ``users`` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
|
||||
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
|
||||
information for synapse developers as well as synapse administrators.
|
||||
|
||||
information for Synapse developers as well as Synapse administrators.
|
||||
Developers might be particularly interested in:
|
||||
|
||||
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
|
||||
@@ -225,6 +280,187 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
for other installation methods.) Then ask poetry to create a virtual environment
|
||||
from the project and install Synapse's dependencies::
|
||||
|
||||
poetry install --extras "all test"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||
|
||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||
for more information.
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
poetry run synapse_homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
poetry run synapse_homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
Running the unit tests
|
||||
----------------------
|
||||
|
||||
After getting up and running, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
|
||||
poetry run trial tests
|
||||
|
||||
This should end with a 'PASSED' result (note that exact numbers will
|
||||
differ)::
|
||||
|
||||
Ran 1337 tests in 716.064s
|
||||
|
||||
PASSED (skips=15, successes=1322)
|
||||
|
||||
For more tips on running the unit tests, like running a specific test or
|
||||
to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
|
||||
|
||||
|
||||
Running the Integration Tests
|
||||
-----------------------------
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `SyTest installation
|
||||
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Need help? Join our community support room on Matrix:
|
||||
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If synapse runs out of file handles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of March 2019 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
Help!! Synapse is slow and eats all my RAM/CPU!
|
||||
-----------------------------------------------
|
||||
|
||||
First, ensure you are running the latest version of Synapse, using Python 3
|
||||
with a PostgreSQL database.
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the ``libjemalloc1`` package and adding this
|
||||
line to ``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
If you're encountering high CPU use by the Synapse process itself, you
|
||||
may be affected by a bug with presence tracking that leads to a
|
||||
massive excess of outgoing federation requests (see `discussion
|
||||
<https://github.com/matrix-org/synapse/issues/3971>`_). If metrics
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by setting
|
||||
the following in the Synapse config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
presence:
|
||||
enabled: false
|
||||
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See
|
||||
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# A build script for poetry that adds the rust extension.
|
||||
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from setuptools_rust import Binding, RustExtension
|
||||
|
||||
|
||||
def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
original_project_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
|
||||
|
||||
extension = RustExtension(
|
||||
target="synapse.synapse_rust",
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
# We force always building in release mode, as we can't tell the
|
||||
# difference between using `poetry` in development vs production.
|
||||
debug=False,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
@@ -94,6 +94,20 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
|
||||
### Add Workers to `instance_map`
|
||||
|
||||
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
|
||||
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
|
||||
port: 8034 # The port assigned to the replication listener in your worker config file
|
||||
synapse-federation-sender-1:
|
||||
host: synapse-federation-sender-1
|
||||
port: 8034
|
||||
```
|
||||
|
||||
### Configure Federation Senders
|
||||
|
||||
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
|
||||
@@ -108,4 +122,4 @@ federation_sender_instances:
|
||||
|
||||
## Other Worker types
|
||||
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
@@ -5,4 +5,10 @@ worker_name: synapse-federation-sender-1
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /data/federation_sender.log.config
|
||||
|
||||
@@ -6,6 +6,10 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
- type: http
|
||||
port: 8081
|
||||
x_forwarded: true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
21
contrib/prometheus/synapse-v1.rules
Normal file
21
contrib/prometheus/synapse-v1.rules
Normal file
@@ -0,0 +1,21 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,20 +1,37 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
###
|
||||
### Prometheus Console Only
|
||||
### The following rules are only needed if you use the Prometheus Console
|
||||
### in contrib/prometheus/consoles/synapse.html
|
||||
###
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus_total + 0'
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
@@ -23,11 +40,11 @@ groups:
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus_total + 0'
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus_total + 0'
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
@@ -41,34 +58,21 @@ groups:
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
###
|
||||
### End of 'Prometheus Console Only' rules block
|
||||
###
|
||||
|
||||
|
||||
###
|
||||
### Grafana Only
|
||||
### The following rules are only needed if you use the Grafana dashboard
|
||||
### in contrib/grafana/synapse.json
|
||||
###
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
###
|
||||
### End of 'Grafana Only' rules block
|
||||
###
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
|
||||
#!/bin/bash
|
||||
for i in {1..5}
|
||||
do
|
||||
cat << EOF > generic_worker$i.yaml
|
||||
cat << EOF >> generic_worker$i.yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker$i
|
||||
|
||||
@@ -15,8 +15,6 @@ worker_name: generic_worker$i
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
|
||||
9
debian/build_virtualenv
vendored
9
debian/build_virtualenv
vendored
@@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0
|
||||
pip install poetry==1.2.0b1
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
@@ -61,7 +61,7 @@ dh_virtualenv \
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
@@ -78,14 +78,9 @@ case "$DEB_BUILD_OPTIONS" in
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
# To avoid pulling in the unbuilt Synapse in the local directory
|
||||
pushd /
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
popd
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
201
debian/changelog
vendored
201
debian/changelog
vendored
@@ -1,204 +1,3 @@
|
||||
matrix-synapse-py3 (1.74.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.74.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
|
||||
|
||||
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
|
||||
|
||||
* New dependency on libicu-dev to provide improved results for user
|
||||
search.
|
||||
* New Synapse release 1.74.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Dec 2022 11:48:56 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.70.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Oct 2022 11:31:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc4.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Oct 2022 14:45:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
|
||||
|
||||
* The man page for the hash_password script has been updated to reflect
|
||||
the correct default value of 'bcrypt_rounds'.
|
||||
* New Synapse release 1.69.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
|
||||
|
||||
matrix-synapse-py3 (1.68.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.68.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Sep 2022 12:02:09 +0100
|
||||
|
||||
matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.68.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.68.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
|
||||
|
||||
matrix-synapse-py3 (1.67.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.67.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Sep 2022 09:19:56 +0100
|
||||
|
||||
matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Use stable poetry 1.2.0 version, rather than a prerelease.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New Synapse release 1.67.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Sep 2022 09:01:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 31 Aug 2022 11:20:17 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
[ Jörg Behrmann ]
|
||||
* Update debhelper to compatibility level 12.
|
||||
* Drop the preinst script stopping synapse.
|
||||
* Allocate a group for the system user.
|
||||
* Change dpkg-statoverride to --force-statoverride-add.
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Disable `dh_auto_configure` as it broke during Rust build.
|
||||
|
||||
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Aug 2022 12:25:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.66.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Aug 2022 09:48:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Aug 2022 16:51:26 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Aug 2022 11:38:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.65.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Aug 2022 11:39:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.64.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.64.0.
|
||||
|
||||
1
debian/compat
vendored
Normal file
1
debian/compat
vendored
Normal file
@@ -0,0 +1 @@
|
||||
10
|
||||
4
debian/control
vendored
4
debian/control
vendored
@@ -4,12 +4,10 @@ Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
Build-Depends:
|
||||
debhelper-compat (= 12),
|
||||
debhelper (>= 10),
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
libicu-dev,
|
||||
pkg-config,
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
|
||||
2
debian/hash_password.1
vendored
2
debian/hash_password.1
vendored
@@ -10,7 +10,7 @@
|
||||
.P
|
||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||
.P
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||
.P
|
||||
The hashed password is written on the \fBSTDOUT\fR\.
|
||||
.SH "FILES"
|
||||
|
||||
2
debian/hash_password.ronn
vendored
2
debian/hash_password.ronn
vendored
@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
|
||||
|
||||
It accepts an YAML file which can be used to specify parameters like the
|
||||
number of rounds for bcrypt and password_config section having the pepper
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **12**.
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
|
||||
|
||||
The hashed password is written on the `STDOUT`.
|
||||
|
||||
|
||||
4
debian/matrix-synapse-py3.postinst
vendored
4
debian/matrix-synapse-py3.postinst
vendored
@@ -40,12 +40,12 @@ EOF
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
|
||||
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
|
||||
dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR
|
||||
dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
31
debian/matrix-synapse-py3.preinst
vendored
Normal file
31
debian/matrix-synapse-py3.preinst
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# Attempt to undo some of the braindamage caused by
|
||||
# https://github.com/matrix-org/package-synapse-debian/issues/18.
|
||||
#
|
||||
# Due to reasons [1], the old python2 matrix-synapse package will not stop the
|
||||
# service when the package is uninstalled. Our maintainer scripts will do the
|
||||
# right thing in terms of ensuring the service is enabled and unmasked, but
|
||||
# then do a `systemctl start matrix-synapse`, which of course does nothing -
|
||||
# leaving the old (py2) service running.
|
||||
#
|
||||
# There should normally be no reason for the service to be running during our
|
||||
# preinst, so we assume that if it *is* running, it's due to that situation,
|
||||
# and stop it.
|
||||
#
|
||||
# [1] dh_systemd_start doesn't do anything because it sees that there is an
|
||||
# init.d script with the same name, so leaves it to dh_installinit.
|
||||
#
|
||||
# dh_installinit doesn't do anything because somebody gave it a --no-start
|
||||
# for unknown reasons.
|
||||
|
||||
if [ -x /bin/systemctl ]; then
|
||||
if /bin/systemctl --quiet is-active -- matrix-synapse; then
|
||||
echo >&2 "stopping existing matrix-synapse service"
|
||||
/bin/systemctl stop matrix-synapse || true
|
||||
fi
|
||||
fi
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
2
debian/matrix-synapse.default
vendored
Normal file
2
debian/matrix-synapse.default
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Specify environment variables used when running Synapse
|
||||
# SYNAPSE_CACHE_FACTOR=0.5 (default)
|
||||
6
debian/matrix-synapse.service
vendored
6
debian/matrix-synapse.service
vendored
@@ -5,6 +5,7 @@ Description=Synapse Matrix homeserver
|
||||
Type=notify
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
@@ -12,10 +13,5 @@ Restart=always
|
||||
RestartSec=3
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
# The environment file is not shipped by default anymore and the below directive
|
||||
# is for backwards compatibility only. Please use your homeserver.yaml if
|
||||
# possible.
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
14
debian/rules
vendored
14
debian/rules
vendored
@@ -6,19 +6,15 @@
|
||||
# assume we only have one package
|
||||
PACKAGE_NAME:=`dh_listpackages`
|
||||
|
||||
override_dh_installsystemd:
|
||||
dh_installsystemd --name=matrix-synapse
|
||||
override_dh_systemd_enable:
|
||||
dh_systemd_enable --name=matrix-synapse
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit --name=matrix-synapse
|
||||
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
override_dh_auto_configure:
|
||||
|
||||
# many libraries pulled from PyPI have allocatable sections after
|
||||
# non-allocatable ones on which dwz errors out. For those without the issue the
|
||||
# gains are only marginal
|
||||
override_dh_dwz:
|
||||
|
||||
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
|
||||
# (executables and shared libs) in the package, and looks for the shared
|
||||
# libraries that they depend on. It then adds a dependency on the package that
|
||||
|
||||
@@ -31,9 +31,7 @@ ARG PYTHON_VERSION=3.9
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
@@ -42,14 +40,22 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
apt-get update -qq && apt-get install -yqq git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
# We use a specific commit from poetry's master branch instead of our usual 1.1.14,
|
||||
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
|
||||
# https://github.com/python-poetry/poetry/pull/5156 and
|
||||
# https://github.com/python-poetry/poetry/issues/5141 ;
|
||||
# without it, we generate a requirements.txt with incorrect environment markers,
|
||||
# which causes necessary packages to be omitted when we `pip install`.
|
||||
#
|
||||
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
|
||||
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user "poetry==1.2.0"
|
||||
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
@@ -62,23 +68,12 @@ COPY pyproject.toml poetry.lock /synapse/
|
||||
# reason, such as when a git repository is used directly as a dependency.
|
||||
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
|
||||
|
||||
# If specified, we won't use the Poetry lockfile.
|
||||
# Instead, we'll just install what a regular `pip install` would from PyPI.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
|
||||
# Otherwise, just create an empty requirements file so that the Dockerfile can
|
||||
# proceed.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
else \
|
||||
touch /synapse/requirements.txt; \
|
||||
fi
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
|
||||
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
@@ -94,28 +89,11 @@ RUN \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
@@ -127,29 +105,17 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY rust /synapse/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
COPY pyproject.toml README.rst /synapse/
|
||||
|
||||
# Install the synapse package itself.
|
||||
# If we have populated requirements.txt, we don't install any dependencies
|
||||
# as we should already have those from the previous `pip install` step.
|
||||
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
||||
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
|
||||
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
else \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
fi
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
###
|
||||
### Stage 2: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
@@ -167,7 +133,6 @@ RUN \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -72,7 +72,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
@@ -84,19 +83,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
|
||||
@@ -1,67 +1,39 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Inherit from the official Synapse docker image
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
# Install deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
|
||||
FROM debian:bullseye-slim AS deps_base
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
redis-server nginx-light
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
|
||||
# Similarly, a base to copy the redis server from.
|
||||
#
|
||||
# The redis docker image has fewer dynamic libraries than the debian package,
|
||||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM redis:6-bullseye AS redis_base
|
||||
# Disable the default nginx sites
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install supervisor~=4.2
|
||||
RUN mkdir -p /etc/supervisor/conf.d
|
||||
# Copy a script to prefix log lines with the supervisor program name
|
||||
COPY ./docker/prefix-log /usr/local/bin/
|
||||
|
||||
# Copy over redis and nginx
|
||||
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
|
||||
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
|
||||
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
# Copy a script to prefix log lines with the supervisor program name
|
||||
COPY ./docker/prefix-log /usr/local/bin/
|
||||
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -191,7 +191,7 @@ If you need to build the image from a Synapse checkout, use the following `docke
|
||||
build` command from the repo's root:
|
||||
|
||||
```
|
||||
DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
You can choose to build a different docker image by changing the value of the `-f` flag to
|
||||
@@ -241,4 +241,4 @@ healthcheck:
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
|
||||
@@ -7,31 +7,36 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
|
||||
FROM $FROM
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
# since for repeated rebuilds, this is much faster than apt installing
|
||||
# postgres each time.
|
||||
# first of all, we create a base image with a postgres server and database,
|
||||
# which we can copy into the target image. For repeated rebuilds, this is
|
||||
# much faster than apt installing postgres each time.
|
||||
#
|
||||
# This trick only works because (a) the Synapse image happens to have all the
|
||||
# shared libraries that postgres wants, (b) we use a postgres image based on
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
|
||||
# This trick only works because (a) the Synapse image happens to have all the
|
||||
# shared libraries that postgres wants, (b) we use a postgres image based on
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
|
||||
FROM postgres:13-bullseye AS postgres_base
|
||||
# initialise the database cluster in /var/lib/postgresql
|
||||
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
|
||||
|
||||
# Configure a password and create a database for Synapse
|
||||
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
|
||||
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
|
||||
|
||||
# now build the final image, based on the Synapse image.
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# copy the postgres installation over from the image we built above
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# Extend the shared homeserver config to disable rate-limiting,
|
||||
# set Complement's static shared secret, enable registration, amongst other
|
||||
# tweaks to get Synapse ready for testing.
|
||||
|
||||
@@ -45,12 +45,7 @@ esac
|
||||
|
||||
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# Specify the workers to test with
|
||||
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
|
||||
# utilizing WORKERS=1 for backwards compatibility.
|
||||
# -n True if the length of string is non-zero.
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
@@ -62,12 +57,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
federation_reader, \
|
||||
federation_sender, \
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
|
||||
@@ -12,8 +12,6 @@ trusted_key_servers: []
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
url_preview_enabled: true
|
||||
url_preview_ip_range_blacklist: []
|
||||
|
||||
## Registration ##
|
||||
|
||||
@@ -92,6 +90,8 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join responses
|
||||
@@ -100,8 +100,8 @@ experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
{% endif %}
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -19,7 +19,7 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers.
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -39,7 +39,6 @@
|
||||
# continue to work if so.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -50,18 +49,13 @@ from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
# Watching /_matrix/media and related needs a "media" listener
|
||||
# Stream Writers require "client" and "replication" listeners because they
|
||||
# have to attach by instance_map to the master process and have client endpoints.
|
||||
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.pusher",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {},
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
@@ -84,11 +78,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
},
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
@@ -99,10 +89,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.federation_sender",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {},
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
@@ -117,35 +107,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"client_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
|
||||
"^/_matrix/client/v1/rooms/.*/hierarchy$",
|
||||
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
|
||||
"^/_matrix/client/v1/rooms/.*/threads$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
|
||||
"^/_matrix/client/versions$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
@@ -164,7 +125,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/v1/timestamp_to_event/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
@@ -211,54 +171,14 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"presence": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"receipts": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"to_device": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"typing": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,19 +201,24 @@ upstream {upstream_worker_type} {{
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
"""Log something to the stdout.
|
||||
|
||||
Args:
|
||||
txt: The text to log.
|
||||
"""
|
||||
print(txt)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
print(txt, file=sys.stderr)
|
||||
"""Log something and exit with an error code.
|
||||
|
||||
Args:
|
||||
txt: The text to log in error.
|
||||
"""
|
||||
log(txt)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -322,14 +247,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
outfile.write(rendered)
|
||||
|
||||
|
||||
def add_worker_roles_to_shared_config(
|
||||
def add_sharding_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
@@ -360,19 +285,9 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -384,7 +299,7 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
@@ -458,8 +373,8 @@ def generate_worker_files(
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
||||
# Split type names by comma
|
||||
worker_types = worker_types_env.split(",")
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -478,11 +393,14 @@ def generate_worker_files(
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_type = worker_type.strip()
|
||||
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
@@ -501,11 +419,11 @@ def generate_worker_files(
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
if worker_type_total_count > 1:
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_sharding_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
@@ -686,24 +604,14 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
with open(mark_filepath, "w") as f:
|
||||
f.write("")
|
||||
|
||||
# Lifted right out of start.py
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# Start supervisord, which will start Synapse, all of the configured worker
|
||||
# processes, redis, nginx etc. according to the config we created above.
|
||||
log("Starting supervisord")
|
||||
flush_buffers()
|
||||
os.execle(
|
||||
os.execl(
|
||||
"/usr/local/bin/supervisord",
|
||||
"supervisord",
|
||||
"-c",
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
environ,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# This dockerfile builds an editable install of Synapse.
|
||||
#
|
||||
# Used by `complement.sh`. Not suitable for production use.
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
# Install Rust and other dependencies (stolen from normal Dockerfile)
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
# Make a base copy of the editable source tree, so that we have something to
|
||||
# install and build now — even though it's going to be covered up by a mount
|
||||
# at runtime.
|
||||
COPY synapse /editable-src/synapse/
|
||||
COPY rust /editable-src/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
|
||||
|
||||
RUN pip install poetry
|
||||
RUN poetry config virtualenvs.create false
|
||||
RUN cd /editable-src && poetry install --extras all
|
||||
|
||||
# Make copies of useful things for inspection:
|
||||
# - the Rust module (must be copied to the editable source tree before startup)
|
||||
# - poetry.lock is useful for checking if dependencies have changed.
|
||||
RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
|
||||
RUN cp /editable-src/poetry.lock /poetry.lock.bak
|
||||
|
||||
|
||||
### Extra setup from original Dockerfile
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
|
||||
EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
@@ -13,19 +13,14 @@ import jinja2
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
print(txt)
|
||||
print(txt, file=sys.stderr)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
print(txt, file=sys.stderr)
|
||||
log(txt)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -136,10 +131,10 @@ def generate_config_from_template(
|
||||
|
||||
if ownership is not None:
|
||||
log(f"Setting ownership on /data to {ownership}")
|
||||
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
subprocess.run(args, check=True)
|
||||
subprocess.check_output(args)
|
||||
|
||||
|
||||
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
|
||||
@@ -163,7 +158,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.run(["chown", ownership, data_dir], check=True)
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
# create a suitable log config from our template
|
||||
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
|
||||
@@ -190,7 +185,6 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
"--open-private-ports",
|
||||
]
|
||||
# log("running %s" % (args, ))
|
||||
flush_buffers()
|
||||
os.execv(sys.executable, args)
|
||||
|
||||
|
||||
@@ -273,10 +267,8 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = [sys.executable] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
flush_buffers()
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
flush_buffers()
|
||||
os.execve(sys.executable, args, environ)
|
||||
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [coturn TURN server](setup/turn/coturn.md)
|
||||
- [eturnal TURN server](setup/turn/eturnal.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
@@ -71,7 +69,6 @@
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
|
||||
- [Monthly Active Users](usage/administration/monthly_active_users.md)
|
||||
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
|
||||
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
|
||||
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
|
||||
|
||||
@@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
|
||||
in the homeserver configuration), and a one-time nonce. If the registration
|
||||
shared secret is not configured, this API is not enabled.
|
||||
(`registration_shared_secret` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API:
|
||||
|
||||
@@ -46,24 +46,7 @@ As an example:
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, either the
|
||||
string "admin" or "notadmin", and optionally the user_type
|
||||
each separated by NULs.
|
||||
|
||||
Here is an easy way to generate the HMAC digest if you have Bash and OpenSSL:
|
||||
|
||||
```bash
|
||||
# Update these values and then paste this code block into a bash terminal
|
||||
nonce='thisisanonce'
|
||||
username='pepper_roni'
|
||||
password='pizza'
|
||||
admin='admin'
|
||||
secret='shared_secret'
|
||||
|
||||
printf '%s\0%s\0%s\0%s' "$nonce" "$username" "$password" "$admin" |
|
||||
openssl sha1 -hmac "$secret" |
|
||||
awk '{print $2}'
|
||||
```
|
||||
|
||||
For an example of generation in Python:
|
||||
each separated by NULs. For an example of generation in Python:
|
||||
|
||||
```python
|
||||
import hmac, hashlib
|
||||
@@ -87,4 +70,4 @@ def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
return mac.hexdigest()
|
||||
```
|
||||
```
|
||||
@@ -302,8 +302,6 @@ The following fields are possible in the JSON response body:
|
||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space.
|
||||
If the room does not define a type, the value will be `null`.
|
||||
* `forgotten` - Whether all local users have
|
||||
[forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room.
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -332,13 +330,10 @@ A response body like the following is returned:
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534,
|
||||
"room_type": "m.space",
|
||||
"forgotten": false
|
||||
"room_type": "m.space"
|
||||
}
|
||||
```
|
||||
|
||||
_Changed in Synapse 1.66:_ Added the `forgotten` key to the response body.
|
||||
|
||||
# Room Members API
|
||||
|
||||
The Room Members admin API allows server admins to get a list of all members of a room.
|
||||
@@ -393,151 +388,6 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
# Room Messages API
|
||||
|
||||
The Room Messages admin API allows server admins to get all messages
|
||||
sent to a room in a given timeframe. There are various parameters available
|
||||
that allow for filtering and ordering the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/messages
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish you fetch messages from.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
|
||||
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
|
||||
* `to` - The token to spot returning events at.
|
||||
* `limit` - The maximum number of events to return. Defaults to `10`.
|
||||
* `filter` - A JSON RoomEventFilter to filter returned events with.
|
||||
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `chunk` - A list of room events. The order depends on the dir parameter.
|
||||
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
|
||||
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
|
||||
If no further events are available, this property is omitted from the response.
|
||||
* `start` - A token corresponding to the start of chunk.
|
||||
* `state` - A list of state events relevant to showing the chunk.
|
||||
|
||||
**Example**
|
||||
|
||||
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
```json
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>",
|
||||
"msgtype": "m.text"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"name": "The room name"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"state_key": "",
|
||||
"type": "m.room.name",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"body": "Gangnam Style",
|
||||
"info": {
|
||||
"duration": 2140786,
|
||||
"h": 320,
|
||||
"mimetype": "video/mp4",
|
||||
"size": 1563685,
|
||||
"thumbnail_info": {
|
||||
"h": 300,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 46144,
|
||||
"w": 300
|
||||
},
|
||||
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
|
||||
"w": 480
|
||||
},
|
||||
"msgtype": "m.video",
|
||||
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"end": "t47409-4357353_219380_26003_2265",
|
||||
"start": "t47429-4392820_219380_26003_2265"
|
||||
}
|
||||
```
|
||||
|
||||
# Room Timestamp to Event API
|
||||
|
||||
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish to check.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `ts` - a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
* `event_id` - converted from timestamp
|
||||
|
||||
# Block Room API
|
||||
The Block Room admin API allows server admins to block and unblock rooms,
|
||||
and query to see if a given room is blocked.
|
||||
|
||||
@@ -37,13 +37,11 @@ It returns a JSON body like the following:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
"consent_ts": null,
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
@@ -168,7 +166,6 @@ A response body like the following is returned:
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
@@ -179,7 +176,6 @@ A response body like the following is returned:
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
@@ -250,7 +246,6 @@ The following fields are returned in the JSON response body:
|
||||
- `user_type` - string - Type of the user. Normal users are type `None`.
|
||||
This allows user type specific behaviour. There are also types `support` and `bot`.
|
||||
- `deactivated` - bool - Status if that user has been marked as deactivated.
|
||||
- `erased` - bool - Status if that user has been marked as erased.
|
||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
@@ -369,7 +364,6 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
- Remove user's consent information (consent version and timestamp)
|
||||
|
||||
## Reset password
|
||||
|
||||
@@ -759,7 +753,6 @@ A response body like the following is returned:
|
||||
"device_id": "QBUAZIFURK",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
},
|
||||
@@ -767,7 +760,6 @@ A response body like the following is returned:
|
||||
"device_id": "AUIECTSRND",
|
||||
"display_name": "ios",
|
||||
"last_seen_ip": "1.2.3.5",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775025,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
@@ -794,8 +786,6 @@ The following fields are returned in the JSON response body:
|
||||
Absent if no name has been set.
|
||||
- `last_seen_ip` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_user_agent` - The user agent of the device when it was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- `user_id` - Owner of device.
|
||||
@@ -847,7 +837,6 @@ A response body like the following is returned:
|
||||
"device_id": "<device_id>",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
@@ -869,8 +858,6 @@ The following fields are returned in the JSON response body:
|
||||
Absent if no name has been set.
|
||||
- `last_seen_ip` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_user_agent` - The user agent of the device when it was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- `user_id` - Owner of device.
|
||||
@@ -1159,80 +1146,3 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
|
||||
```
|
||||
|
||||
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
|
||||
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
|
||||
|
||||
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/threepid/$medium/users/$address
|
||||
```
|
||||
|
||||
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - Value of the third-party ID.
|
||||
|
||||
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.72.0._
|
||||
|
||||
@@ -34,45 +34,13 @@ the process of indexing it).
|
||||
## Chain Cover Index
|
||||
|
||||
Synapse computes auth chain differences by pre-computing a "chain cover" index
|
||||
for the auth chain in a room, allowing us to efficiently make reachability queries
|
||||
like "is event `A` in the auth chain of event `B`?". We could do this with an index
|
||||
that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this
|
||||
would be prohibitively large, scaling poorly as the room accumulates more state
|
||||
events.
|
||||
for the auth chain in a room, allowing efficient reachability queries like "is
|
||||
event A in the auth chain of event B". This is done by assigning every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
|
||||
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
|
||||
is in the auth chain of `B`) if and only if either:
|
||||
|
||||
Instead, we break down the graph into *chains*. A chain is a subset of a DAG
|
||||
with the following property: for any pair of events `E` and `F` in the chain,
|
||||
the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be
|
||||
linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain
|
||||
is given a *sequence number* local to that chain. The oldest event `E` in the
|
||||
chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has
|
||||
sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has
|
||||
sequence number 3; and so on.
|
||||
|
||||
Synapse ensures that each persisted event belongs to exactly one chain, and
|
||||
tracks how the chains are connected to one another. This allows us to
|
||||
efficiently answer reachability queries. Doing so uses less storage than
|
||||
tracking reachability on an event-by-event basis, particularly when we have
|
||||
fewer and longer chains. See
|
||||
|
||||
> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944).
|
||||
> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598.
|
||||
|
||||
for the original idea or
|
||||
|
||||
> Y. Chen, Y. Chen, [An efficient algorithm for answering graph
|
||||
> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498),
|
||||
> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008,
|
||||
> pp. 893–902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).)
|
||||
|
||||
for a more modern take.
|
||||
|
||||
In practical terms, the chain cover assigns every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links*
|
||||
between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B`
|
||||
(i.e. `A` is in the auth chain of `B`) if and only if either:
|
||||
|
||||
1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
sequence number; or
|
||||
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
|
||||
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
|
||||
@@ -81,9 +49,8 @@ There are actually two potential implementations, one where we store links from
|
||||
each chain to every other reachable chain (the transitive closure of the links
|
||||
graph), and one where we remove redundant links (the transitive reduction of the
|
||||
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
|
||||
would not be stored. Synapse uses the former implementation so that it doesn't
|
||||
need to recurse to test reachability between chains. This trades-off extra storage
|
||||
in order to save CPU cycles and DB queries.
|
||||
would not be stored. Synapse uses the former implementations so that it doesn't
|
||||
need to recurse to test reachability between chains.
|
||||
|
||||
### Example
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
@@ -17,14 +17,6 @@ Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at [https://endoflife.date/python](https://endoflife.date/python) and
|
||||
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
|
||||
|
||||
A Rust compiler is required to build Synapse from source. For any given release
|
||||
the minimum required version may be bumped up to a recent Rust version, and so
|
||||
people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
-------
|
||||
@@ -39,15 +31,3 @@ long process.
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
|
||||
For Rust, the situation is a bit different given that a) the Rust foundation
|
||||
does not generally support older Rust versions, and b) the library ecosystem
|
||||
generally bump their minimum support Rust versions frequently. In general, the
|
||||
Synapse team will try to avoid updating the dependency on Rust to the absolute
|
||||
latest version, but introducing a formal policy is hard given the constraints of
|
||||
the ecosystem.
|
||||
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
@@ -24,15 +24,10 @@ The code of Synapse is written in Python 3. To do pretty much anything, you'll n
|
||||
|
||||
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
|
||||
|
||||
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
@@ -67,8 +62,6 @@ pipx install poetry
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
for other installation methods.
|
||||
|
||||
Synapse requires Poetry version 1.2.0 or later.
|
||||
|
||||
Next, open a terminal and install dependencies as follows:
|
||||
|
||||
```sh
|
||||
@@ -119,11 +112,6 @@ Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
When changes are made to any Rust code then you must call either `poetry install`
|
||||
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
|
||||
is quicker than `poetry install`, so is recommended when making frequent
|
||||
changes to the Rust code.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
@@ -169,12 +157,6 @@ was broken. They are slower than the linters but will typically catch more error
|
||||
poetry run trial tests
|
||||
```
|
||||
|
||||
You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
|
||||
|
||||
```sh
|
||||
poetry run trial -j4 tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
@@ -211,7 +193,7 @@ The database file can then be inspected with:
|
||||
sqlite3 _trial_temp/test.db
|
||||
```
|
||||
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
will always only contain the data generated by the *last run test*. Though generally
|
||||
when debugging, one is only running a single test anyway.
|
||||
|
||||
@@ -326,12 +308,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
- If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
|
||||
types you wish to test. A simple comma-delimited string containing the worker types
|
||||
defined from the `WORKERS_CONFIG` template in
|
||||
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
@@ -341,7 +317,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
If you want to format the output of the tests the same way as it looks in CI,
|
||||
install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
|
||||
install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
|
||||
|
||||
You can then use this incantation to format the tests appropriately:
|
||||
|
||||
@@ -398,7 +374,7 @@ This file will become part of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
|
||||
release, so the content of the file should be a short description of your
|
||||
change in the same style as the rest of the changelog. The file can contain Markdown
|
||||
formatting, and must end with a full stop (.) or an exclamation mark (!) for
|
||||
formatting, and should end with a full stop (.) or an exclamation mark (!) for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
|
||||
@@ -191,28 +191,3 @@ There are three separate aspects to this:
|
||||
flavour will be accepted by SQLite 3.22, but will give a column whose
|
||||
default value is the **string** `"FALSE"` - which, when cast back to a boolean
|
||||
in Python, evaluates to `True`.
|
||||
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
`event_id`'s can be considered globally unique although there has been a lot of
|
||||
debate on this topic in places like
|
||||
[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
|
||||
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
|
||||
has no resolution yet (as of 2022-09-01). There are several places in Synapse
|
||||
and even in the Matrix APIs like [`GET
|
||||
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
|
||||
where we assume that event IDs are globally unique.
|
||||
|
||||
When scoping `event_id` in a database schema, it is often nice to accompany it
|
||||
with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
|
||||
REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
|
||||
makes it very easy to find and clean up everything in a room when it needs to be
|
||||
purged (no need to use sub-`select` query or join from the `events` table).
|
||||
|
||||
A note on collisions: In room versions `1` and `2` it's possible to end up with
|
||||
two events with the same `event_id` (in the same or different rooms). After room
|
||||
version `3`, that can only happen with a hash collision, which we basically hope
|
||||
will never happen (SHA256 has a massive big key space).
|
||||
|
||||
|
||||
|
||||
@@ -126,23 +126,6 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
|
||||
poetry install --extras all --remove-untracked
|
||||
```
|
||||
|
||||
## ...delete everything and start over from scratch?
|
||||
|
||||
```shell
|
||||
# Stop the current virtualenv if active
|
||||
$ deactivate
|
||||
|
||||
# Remove all of the files from the current environment.
|
||||
# Don't worry, even though it says "all", this will only
|
||||
# remove the Poetry virtualenvs for the current project.
|
||||
$ poetry env remove --all
|
||||
|
||||
# Reactivate Poetry shell to create the virtualenv again
|
||||
$ poetry shell
|
||||
# Install everything again
|
||||
$ poetry install --extras all
|
||||
```
|
||||
|
||||
## ...run a command in the `poetry` virtualenv?
|
||||
|
||||
Use `poetry run cmd args` when you need the python virtualenv context.
|
||||
@@ -260,11 +243,14 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
|
||||
|
||||
## Check the version of poetry with `poetry --version`.
|
||||
|
||||
The minimum version of poetry supported by Synapse is 1.2.
|
||||
At the time of writing, the 1.2 series is beta only. We have seen some examples
|
||||
where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly
|
||||
by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical
|
||||
[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain
|
||||
[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775).
|
||||
|
||||
It can also be useful to check the version of `poetry-core` in use. If you've
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
|
||||
poetry-core`.
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`.
|
||||
|
||||
## Clear caches: `poetry cache clear --all pypi`.
|
||||
|
||||
@@ -273,16 +259,6 @@ from PyPI. (This is what makes poetry seem slow when doing the first
|
||||
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
|
||||
<name of cache>` to see if that fixes things.
|
||||
|
||||
## Remove outdated egg-info
|
||||
|
||||
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
|
||||
install.
|
||||
|
||||
This stores some cached information about dependencies and often conflicts with
|
||||
letting Poetry do the right thing.
|
||||
|
||||
|
||||
|
||||
## Try `--verbose` or `--dry-run` arguments.
|
||||
|
||||
Sometimes useful to see what poetry's internal logic is.
|
||||
|
||||
@@ -8,8 +8,7 @@ and allow server and room admins to configure how long messages should
|
||||
be kept in a homeserver's database before being purged from it.
|
||||
**Please note that, as this feature isn't part of the Matrix
|
||||
specification yet, this implementation is to be considered as
|
||||
experimental. There are known bugs which may cause database corruption.
|
||||
Proceed with caution.**
|
||||
experimental.**
|
||||
|
||||
A message retention policy is mainly defined by its `max_lifetime`
|
||||
parameter, which defines how long a message can be kept around after
|
||||
|
||||
@@ -7,30 +7,17 @@
|
||||
|
||||
1. Enable Synapse metrics:
|
||||
|
||||
In `homeserver.yaml`, make sure `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
|
||||
collect data:
|
||||
|
||||
There are two methods of enabling the metrics endpoint in Synapse.
|
||||
There are two methods of enabling metrics in Synapse.
|
||||
|
||||
The first serves the metrics as a part of the usual web server and
|
||||
can be enabled by adding the `metrics` resource to the existing
|
||||
listener as such as in this example:
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
# added "metrics" in this line
|
||||
- names: [client, federation, metrics]
|
||||
compress: false
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
@@ -44,26 +31,19 @@
|
||||
to just internal networks easier. The served metrics are available
|
||||
over HTTP only, and will be available at `/_synapse/metrics`.
|
||||
|
||||
Add a new listener to homeserver.yaml as in this example:
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
# beginning of the new metrics listener
|
||||
- port: 9000
|
||||
type: metrics
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
|
||||
1. Restart Synapse.
|
||||
|
||||
1. Add a Prometheus target for Synapse.
|
||||
@@ -152,8 +132,6 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
convention of the upstream `prometheus_client`. The old names are
|
||||
considered deprecated and will be removed in a future version of
|
||||
Synapse.
|
||||
**The old names will be disabled by default in Synapse v1.71.0 and removed
|
||||
altogether in Synapse v1.73.0.**
|
||||
|
||||
| New Name | Old Name |
|
||||
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
@@ -165,13 +143,6 @@ altogether in Synapse v1.73.0.**
|
||||
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
|
||||
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
|
||||
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
|
||||
| synapse_util_caches_cache | synapse_util_caches_cache:total |
|
||||
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
|
||||
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
|
||||
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
|
||||
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||
@@ -209,9 +180,6 @@ altogether in Synapse v1.73.0.**
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
| synapse_admin_mau_current | synapse_admin_mau:current |
|
||||
| synapse_admin_mau_max | synapse_admin_mau:max |
|
||||
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
@@ -290,7 +258,7 @@ Standard Metric Names
|
||||
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has
|
||||
been changed to fit prometheus standard naming conventions. Additionally
|
||||
the units have been changed to seconds, from milliseconds.
|
||||
the units have been changed to seconds, from miliseconds.
|
||||
|
||||
| New name | Old name |
|
||||
| ---------------------------------------- | --------------------------------- |
|
||||
|
||||
@@ -263,7 +263,7 @@ class MyAuthProvider:
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("my_field"):
|
||||
return (self.api.get_qualified_user_id(username), None)
|
||||
return self.api.get_qualified_user_id(username)
|
||||
|
||||
async def check_pass(
|
||||
self,
|
||||
@@ -280,5 +280,5 @@ class MyAuthProvider:
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("password"):
|
||||
return (self.api.get_qualified_user_id(username), None)
|
||||
return self.api.get_qualified_user_id(username)
|
||||
```
|
||||
|
||||
@@ -49,13 +49,6 @@ setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## OIDC Back-Channel Logout
|
||||
|
||||
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
|
||||
|
||||
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
|
||||
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
@@ -130,9 +123,6 @@ oidc_providers:
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
|
||||
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
@@ -154,8 +144,6 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
|
||||
| Backchannel Logout Session Required (optional) | `On` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -179,18 +167,14 @@ oidc_providers:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
backchannel_logout_enabled: true # Optional
|
||||
```
|
||||
|
||||
### Auth0
|
||||
|
||||
[Auth0][auth0] is a hosted SaaS IdP solution.
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
3. Add a rule with any name to add the `preferred_username` claim.
|
||||
(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.)
|
||||
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
|
||||
@@ -350,12 +334,11 @@ oidc_providers:
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile", "email"] # email is optional, read below
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}" # needs "email" in scopes above
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
@@ -438,7 +421,7 @@ Synapse config:
|
||||
user_mapping_provider:
|
||||
config:
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
email_template: "{{ '{{ user.email }}' }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
@@ -590,44 +573,3 @@ oidc_providers:
|
||||
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### Mastodon
|
||||
|
||||
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
|
||||
|
||||
The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
|
||||
|
||||
This example assumes that:
|
||||
* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
|
||||
* Synapse will be registered as an app named `my_synapse_app`.
|
||||
|
||||
Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
|
||||
```sh
|
||||
curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
|
||||
```
|
||||
|
||||
You should receive a response similar to the following. Make sure to save it.
|
||||
```json
|
||||
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
|
||||
```
|
||||
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: my_mastodon
|
||||
idp_name: "Mastodon Instance Example"
|
||||
discover: false
|
||||
issuer: "https://your.mastodon.instance.url/@admin"
|
||||
client_id: "someclientid_123"
|
||||
client_secret: "someclientsecret_123"
|
||||
authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
|
||||
token_endpoint: "https://your.mastodon.instance.url/oauth/token"
|
||||
userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
|
||||
scopes: ["read"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
```
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Using Postgres
|
||||
|
||||
The minimum supported version of PostgreSQL is determined by the [Dependency
|
||||
Deprecation Policy](deprecation_policy.md).
|
||||
Synapse supports PostgreSQL versions 10 or later.
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
|
||||
@@ -45,10 +45,6 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
|
||||
when using a containerized Synapse, as that will prevent it from responding
|
||||
to proxied traffic.)
|
||||
|
||||
Optionally, you can also set
|
||||
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
|
||||
so that the server extracts and re-uses the same request ID format that the
|
||||
reverse proxy is using.
|
||||
|
||||
## Reverse-proxy configuration examples
|
||||
|
||||
@@ -79,9 +75,6 @@ server {
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
|
||||
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
|
||||
@@ -84,9 +84,7 @@ file when you upgrade the Debian package to a later version.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
Andrej Shadura maintains a
|
||||
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
|
||||
the Debian repositories.
|
||||
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
|
||||
For `bookworm` and `sid`, it can be installed simply with:
|
||||
|
||||
```sh
|
||||
@@ -102,27 +100,23 @@ for information on how to use backports.
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are [old and suffer from known security vulnerabilities](
|
||||
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
|
||||
).
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as
|
||||
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Additionally, Oleg Girko provides Fedora RPMs at
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as
|
||||
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
@@ -157,8 +151,7 @@ sudo pip install py-bcrypt
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as
|
||||
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
@@ -188,7 +181,7 @@ doas pkg_add synapse
|
||||
#### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/synapse.nix>
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
|
||||
|
||||
### Installing as a Python module from PyPI
|
||||
@@ -203,10 +196,6 @@ System requirements:
|
||||
- Python 3.7 or later, up to Python 3.10.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
unavailable, you will need to have a recent Rust compiler installed. The easiest
|
||||
way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
@@ -278,7 +267,7 @@ Installing prerequisites on Ubuntu or Debian:
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
@@ -287,7 +276,7 @@ Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3 icu
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
@@ -297,8 +286,7 @@ Installing prerequisites on CentOS or Fedora Linux:
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel \
|
||||
libicu-devel
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
@@ -311,14 +299,9 @@ You may need to install the latest Xcode developer tools:
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) for them.
|
||||
|
||||
You may need to install icu, and make the icu binaries and libraries accessible.
|
||||
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
|
||||
|
||||
On ARM-based Macs you may also need to install libjpeg and libpq:
|
||||
On ARM-based Macs you may need to explicitly install libjpeg which is a pillow dependency. You can use Homebrew (https://brew.sh):
|
||||
```sh
|
||||
brew install jpeg libpq
|
||||
brew install jpeg
|
||||
```
|
||||
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
@@ -337,8 +320,7 @@ Installing prerequisites on openSUSE:
|
||||
```sh
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel \
|
||||
libicu-devel
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
@@ -524,13 +506,9 @@ email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
|
||||
One way to create a new user is to do so from a client like
|
||||
[Element](https://element.io/). This requires registration to be enabled via
|
||||
the
|
||||
[`enable_registration`](../usage/configuration/config_documentation.md#enable_registration)
|
||||
setting.
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively, you can create new users from the command line. This can be done as follows:
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
@@ -542,7 +520,7 @@ Alternatively, you can create new users from the command line. This can be done
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
@@ -555,13 +533,12 @@ Make admin [no]:
|
||||
Success!
|
||||
```
|
||||
|
||||
This process uses a setting
|
||||
[`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret),
|
||||
which is shared between Synapse itself and the `register_new_matrix_user`
|
||||
script. It doesn't matter what it is (a random value is generated by
|
||||
`--generate-config`), but it should be kept secret, as anyone with knowledge of
|
||||
it can register users, including admin accounts, on your server even if
|
||||
`enable_registration` is `false`.
|
||||
This process uses a setting `registration_shared_secret` in
|
||||
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||
value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
# coturn TURN server
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian and Ubuntu based distributions
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
sudo apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure `coturn` to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, `coturn` can be configured to write to a
|
||||
logfile - check the example config file supplied with `coturn`.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure `coturn` to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `coturn` to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
sudo systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you built from source:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/turnserver -o
|
||||
```
|
||||
@@ -1,170 +0,0 @@
|
||||
# eturnal TURN server
|
||||
|
||||
The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
|
||||
(which implements the TURN REST API).
|
||||
|
||||
## `eturnal` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The `eturnal` TURN server implementation is available from a variety of sources
|
||||
such as native package managers, binary packages, installation from source or
|
||||
[container image](https://eturnal.net/documentation/code/docker.html). They are
|
||||
all described [here](https://github.com/processone/eturnal#installation).
|
||||
|
||||
Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
|
||||
or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
|
||||
are available as well.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
|
||||
here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
|
||||
`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
|
||||
format. The file contains further explanations.
|
||||
|
||||
Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
|
||||
or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
|
||||
You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
|
||||
|
||||
`eturnal` runs out of the box with the default configuration. To enable TURN and
|
||||
to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
|
||||
must be edited:
|
||||
|
||||
1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
and eturnal's shared `secret` for authentication
|
||||
|
||||
Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
|
||||
configuration file:
|
||||
|
||||
```yaml
|
||||
secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
|
||||
```
|
||||
|
||||
One way to generate a `secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
1. Public IP address
|
||||
|
||||
If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
|
||||
however, it may also be configured by uncommenting and adjusting this line, so
|
||||
`eturnal` advertises that address to connecting clients:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `eturnal` to advertise each available address:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. Logging
|
||||
|
||||
If `eturnal` was started by systemd, log files are written into the
|
||||
`/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
|
||||
instead, the `log_dir` option can be set to `stdout` in the configuration file.
|
||||
|
||||
1. Security considerations
|
||||
|
||||
Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
|
||||
|
||||
```yaml
|
||||
## Reject TURN relaying from/to the following addresses/networks:
|
||||
blacklist: # This is the default blacklist.
|
||||
- "127.0.0.0/8" # IPv4 loopback.
|
||||
- "::1" # IPv6 loopback.
|
||||
- recommended # Expands to a number of networks recommended to be
|
||||
# blocked, but includes private networks. Those
|
||||
# would have to be 'whitelist'ed if eturnal serves
|
||||
# local clients/peers within such networks.
|
||||
```
|
||||
|
||||
To whitelist IP addresses or specific (private) networks, you need to **add** a
|
||||
whitelist part into the configuration file, e.g.:
|
||||
|
||||
```yaml
|
||||
whitelist:
|
||||
- "192.168.0.0/16"
|
||||
- "203.0.113.113"
|
||||
- "2001:db8::/64"
|
||||
```
|
||||
|
||||
The more specific, the better.
|
||||
|
||||
1. TURNS (TURN via TLS/DTLS)
|
||||
|
||||
Also consider supporting TLS/DTLS. To do this, adjust the following settings
|
||||
in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
|
||||
|
||||
```yaml
|
||||
listen:
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: udp
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: tcp
|
||||
- ip: "::"
|
||||
port: 5349
|
||||
transport: tls
|
||||
|
||||
## TLS certificate/key files (must be readable by 'eturnal' user!):
|
||||
tls_crt_file: /etc/eturnal/tls/crt.pem
|
||||
tls_key_file: /etc/eturnal/tls/key.pem
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
|
||||
with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Firewall
|
||||
|
||||
Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. Reload/ restarting `eturnal`
|
||||
|
||||
Changes in the configuration file require `eturnal` to reload/ restart, this
|
||||
can be achieved by:
|
||||
|
||||
```sh
|
||||
eturnalctl reload
|
||||
```
|
||||
|
||||
`eturnal` performs a configuration check before actually reloading/ restarting
|
||||
and provides hints, if something is not correctly configured.
|
||||
|
||||
### eturnalctl opterations script
|
||||
|
||||
`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
|
||||
which can be called e.g. to check, whether the service is up, to restart the service,
|
||||
to query how many active sessions exist, to change logging behaviour and so on.
|
||||
|
||||
Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
|
||||
@@ -22,7 +22,7 @@ choose their own username.
|
||||
In the first case - where users are automatically allocated a Matrix ID - it is
|
||||
the responsibility of the mapping provider to normalise the SSO attributes and
|
||||
map them to a valid Matrix ID. The [specification for Matrix
|
||||
IDs](https://spec.matrix.org/latest/appendices/#user-identifiers) has some
|
||||
IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some
|
||||
information about what is considered valid.
|
||||
|
||||
If the mapping provider does not assign a Matrix ID, then Synapse will
|
||||
@@ -37,10 +37,9 @@ as Synapse). The Synapse config is then modified to point to the mapping provide
|
||||
## OpenID Mapping Providers
|
||||
|
||||
The OpenID mapping provider can be customized by editing the
|
||||
[`oidc_providers.user_mapping_provider.module`](usage/configuration/config_documentation.md#oidc_providers)
|
||||
config option.
|
||||
`oidc_config.user_mapping_provider.module` config option.
|
||||
|
||||
`oidc_providers.user_mapping_provider.config` allows you to provide custom
|
||||
`oidc_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
@@ -59,7 +58,7 @@ A custom mapping provider must specify the following methods:
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`oidc_providers.user_mapping_provider.config` homeserver config option.
|
||||
`oidc_config.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract and validate
|
||||
any option values they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
@@ -73,8 +72,8 @@ A custom mapping provider must specify the following methods:
|
||||
* `async def map_user_attributes(self, userinfo, token, failures)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
- `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo)
|
||||
object to extract user information from.
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- `token` - A dictionary which includes information necessary to make
|
||||
further requests to the OpenID provider.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
@@ -91,13 +90,7 @@ A custom mapping provider must specify the following methods:
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
- `confirm_localpart`: A boolean. If set to `True`, when a `localpart`
|
||||
string is returned from this method, Synapse will prompt the user to
|
||||
either accept this localpart or pick their own username. Otherwise this
|
||||
option has no effect. If omitted, defaults to `False`.
|
||||
- `display_name`: An optional string, the display name for the user.
|
||||
- `emails`: A list of strings, the email address(es) to associate with
|
||||
this user. If omitted, defaults to an empty list.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
* `async def get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
@@ -109,7 +102,7 @@ A custom mapping provider must specify the following methods:
|
||||
will be returned as part of the response during a successful login.
|
||||
|
||||
Note that care should be taken to not overwrite any of the parameters
|
||||
usually returned as part of the [login response](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login).
|
||||
usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
|
||||
|
||||
### Default OpenID Mapping Provider
|
||||
|
||||
@@ -120,8 +113,7 @@ specified in the config. It is located at
|
||||
## SAML Mapping Providers
|
||||
|
||||
The SAML mapping provider can be customized by editing the
|
||||
[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
|
||||
config option.
|
||||
`saml2_config.user_mapping_provider.module` config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
@@ -5,8 +5,6 @@ worker_name: generic_worker1
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
resources:
|
||||
- names: [media]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/media-worker-log.yaml
|
||||
@@ -1,8 +0,0 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
@@ -9,7 +9,7 @@ in, allowing them to specify custom templates:
|
||||
|
||||
```yaml
|
||||
templates:
|
||||
custom_template_directory: /path/to/custom/templates/
|
||||
custom_templates_directory: /path/to/custom/templates/
|
||||
```
|
||||
|
||||
If this setting is not set, or the files named below are not found within the directory,
|
||||
|
||||
@@ -9,28 +9,222 @@ allows the homeserver to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the homeserver and the
|
||||
TURN server.
|
||||
|
||||
This documentation provides two TURN server configuration examples:
|
||||
|
||||
* [coturn](setup/turn/coturn.md)
|
||||
* [eturnal](setup/turn/eturnal.md)
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
|
||||
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
|
||||
|
||||
Afterwards, the homeserver needs some further configuration.
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian installation
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure coturn to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure coturn to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you installed from source:
|
||||
|
||||
```sh
|
||||
bin/turnserver -o
|
||||
```
|
||||
|
||||
## Synapse setup
|
||||
|
||||
Your homeserver configuration file needs the following extra keys:
|
||||
|
||||
1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
|
||||
2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
|
||||
4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
|
||||
1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
|
||||
for your TURN server to be given out to your clients. Add separate
|
||||
entries for each transport your TURN server supports.
|
||||
2. "`turn_shared_secret`": This is the secret shared between your
|
||||
homeserver and your TURN server, so you should set it to the same
|
||||
string you used in turnserver.conf.
|
||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||
generated by your homeserver are valid for (in milliseconds).
|
||||
Shorter times offer less potential for abuse at the expense of
|
||||
increased traffic between web clients and your homeserver to
|
||||
refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
4. "`turn_allow_guests`": Whether to allow guest users to use the
|
||||
TURN server. This is enabled by default, as otherwise VoIP will
|
||||
not work reliably for guests. However, it does introduce a
|
||||
security risk as it lets guests connect to arbitrary endpoints
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
@@ -38,7 +232,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
|
||||
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: true
|
||||
turn_allow_guests: True
|
||||
|
||||
After updating the homeserver configuration, you must restart synapse:
|
||||
|
||||
@@ -69,7 +263,7 @@ Here are a few things to try:
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Try disabling TLS/DTLS listeners and enable only its (unencrypted)
|
||||
* Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
|
||||
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
|
||||
voice & video WebRTC traffic is always encrypted.)
|
||||
|
||||
@@ -94,19 +288,12 @@ Here are a few things to try:
|
||||
|
||||
* ensure that your TURN server uses the NAT gateway as its default route.
|
||||
|
||||
* Enable more verbose logging, in `coturn` via the `verbose` setting:
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
|
||||
|
||||
```yaml
|
||||
## Logging configuration:
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
@@ -130,7 +317,7 @@ Here are a few things to try:
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands for `coturn`:
|
||||
* Use the following shell commands:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
@@ -140,16 +327,11 @@ Here are a few things to try:
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
or for `eturnal`
|
||||
Or:
|
||||
|
||||
```sh
|
||||
eturnalctl credentials
|
||||
```
|
||||
|
||||
|
||||
* Or (**coturn only**): Temporarily configure `coturn` to accept a static
|
||||
username/password. To do this, comment out `use-auth-secret` and
|
||||
`static-auth-secret` and add the following:
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
|
||||
269
docs/upgrade.md
269
docs/upgrade.md
@@ -15,8 +15,9 @@ this document.
|
||||
The website <https://endoflife.date> also offers convenient
|
||||
summaries.
|
||||
|
||||
- If Synapse was installed using [prebuilt packages](setup/installation.md#prebuilt-packages),
|
||||
you will need to follow the normal process for upgrading those packages.
|
||||
- If Synapse was installed using [prebuilt
|
||||
packages](setup/installation.md#prebuilt-packages), you will need to follow the
|
||||
normal process for upgrading those packages.
|
||||
|
||||
- If Synapse was installed using pip then upgrade to the latest
|
||||
version by running:
|
||||
@@ -88,268 +89,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.74.0
|
||||
|
||||
## Unicode support in user search
|
||||
|
||||
This version introduces optional support for an [improved user search dealing with Unicode characters](https://github.com/matrix-org/synapse/pull/14464).
|
||||
|
||||
If you want to take advantage of this feature you need to install PyICU,
|
||||
the ICU native dependency and its development headers
|
||||
so that PyICU can build since no prebuilt wheels are available.
|
||||
|
||||
You can follow [the PyICU documentation](https://pypi.org/project/PyICU/) to do so,
|
||||
and then do `pip install matrix-synapse[icu]` for a PyPI install.
|
||||
|
||||
Docker images and Debian packages need nothing specific as they already
|
||||
include or specify ICU as an explicit dependency.
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
|
||||
and offered an option to disable them.
|
||||
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
|
||||
|
||||
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
|
||||
This also means that the `enable_legacy_metrics` configuration option has been
|
||||
removed; it will no longer be possible to re-enable the legacy metric names.
|
||||
|
||||
If you use metrics and have not yet updated your Grafana dashboard(s),
|
||||
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
|
||||
to this version.
|
||||
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
|
||||
metric names which were missed when legacy metrics were disabled by default.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
|
||||
for more context.
|
||||
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 10, as it is no longer supported upstream.
|
||||
|
||||
This release of Synapse requires PostgreSQL 11+.
|
||||
|
||||
|
||||
# Upgrading to v1.71.0
|
||||
|
||||
## Removal of the `generate_short_term_login_token` module API method
|
||||
|
||||
As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
|
||||
|
||||
Modules relying on it can instead use the `create_login_token` method.
|
||||
|
||||
|
||||
## Changes to the events received by application services (interest)
|
||||
|
||||
To align with spec (changed in
|
||||
[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
|
||||
only considers local users to be interesting. In other words, the `users` namespace
|
||||
regex is only be applied against local users of the homeserver.
|
||||
|
||||
Please note, this probably doesn't affect the expected behavior of your application
|
||||
service, since an interesting local user in a room still means all messages in the room
|
||||
(from local or remote users) will still be considered interesting. And matching a room
|
||||
with the `rooms` or `aliases` namespace regex will still consider all events sent in the
|
||||
room to be interesting to the application service.
|
||||
|
||||
If one of your application service's `users` regex was intending to match a remote user,
|
||||
this will no longer match as you expect. The behavioral mismatch between matching all
|
||||
local users and some remote users is why the spec was changed/clarified and this
|
||||
caveat is no longer supported.
|
||||
|
||||
|
||||
## Legacy Prometheus metric names are now disabled by default
|
||||
|
||||
Synapse v1.71.0 disables legacy Prometheus metric names by default.
|
||||
For administrators that still rely on them and have not yet had chance to update their
|
||||
uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
|
||||
it will no longer be possible to re-enable them.
|
||||
|
||||
If you do not use metrics or you have already updated your Grafana dashboard(s),
|
||||
Prometheus console(s) and alerting rule(s), there is no action needed.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
|
||||
|
||||
|
||||
# Upgrading to v1.69.0
|
||||
|
||||
## Changes to the receipts replication streams
|
||||
|
||||
Synapse now includes information indicating if a receipt applies to a thread when
|
||||
replicating it to other workers. This is a forwards- and backwards-incompatible
|
||||
change: v1.68 and workers cannot process receipts replicated by v1.69 workers, and
|
||||
vice versa.
|
||||
|
||||
Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
|
||||
replication will resume as normal.
|
||||
|
||||
|
||||
## Deprecation of legacy Prometheus metric names
|
||||
|
||||
In current versions of Synapse, some Prometheus metrics are emitted under two different names,
|
||||
with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
|
||||
and one of the names being newer but compliant.
|
||||
|
||||
Synapse v1.71.0 will turn the old metric names off *by default*.
|
||||
For administrators that still rely on them and have not had chance to update their
|
||||
uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
|
||||
be possible to re-enable them.
|
||||
|
||||
The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
|
||||
in the `contrib` directory in the Synapse repository have been updated to no longer
|
||||
rely on the legacy names. These can be used on a current version of Synapse
|
||||
because current versions of Synapse emit both old and new names.
|
||||
|
||||
You may need to update your alerting rules or any other rules that depend on
|
||||
the names of Prometheus metrics.
|
||||
If you want to test your changes before legacy names are disabled by default,
|
||||
you may specify `enable_legacy_metrics: false` in your homeserver configuration.
|
||||
|
||||
A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
|
||||
|
||||
|
||||
## Deprecation of the `generate_short_term_login_token` module API method
|
||||
|
||||
The following method of the module API has been deprecated, and is scheduled to
|
||||
be remove in v1.71.0:
|
||||
|
||||
```python
|
||||
def generate_short_term_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: str = "",
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
It has been replaced by an asynchronous equivalent:
|
||||
|
||||
```python
|
||||
async def create_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: Optional[str] = None,
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
Synapse will log a warning when a module uses the deprecated method, to help
|
||||
administrators find modules using it.
|
||||
|
||||
|
||||
# Upgrading to v1.68.0
|
||||
|
||||
Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
|
||||
|
||||
## SQLite version requirement
|
||||
|
||||
Synapse now requires a SQLite version of 3.27.0 or higher if SQLite is configured as
|
||||
Synapse's database.
|
||||
|
||||
Installations using
|
||||
|
||||
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
|
||||
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
|
||||
- a PostgreSQL database
|
||||
|
||||
are not affected.
|
||||
|
||||
## Rust requirement when building from source.
|
||||
|
||||
Building from a source checkout of Synapse now requires a recent Rust compiler
|
||||
(currently Rust 1.58.1, but see also the
|
||||
[Platform Dependency Policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html)).
|
||||
|
||||
Installations using
|
||||
|
||||
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
|
||||
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
|
||||
- PyPI wheels via `pip install matrix-synapse` (on supported platforms and architectures)
|
||||
|
||||
will not be affected.
|
||||
|
||||
# Upgrading to v1.67.0
|
||||
|
||||
## Direct TCP replication is no longer supported: migrate to Redis
|
||||
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which was deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
CPU saving on the main process and is a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
To migrate to Redis add the [`redis` config](./workers.md#shared-configuration),
|
||||
and remove the TCP `replication` listener from config of the master and
|
||||
`worker_replication_port` from worker config. Note that a HTTP listener with a
|
||||
`replication` resource is still required.
|
||||
|
||||
## Minimum version of Poetry is now v1.2.0
|
||||
|
||||
The minimum supported version of poetry is now 1.2. This should only affect
|
||||
those installing from a source checkout.
|
||||
|
||||
## Rust requirement in the next release
|
||||
|
||||
From the next major release (v1.68.0) installing Synapse from a source checkout
|
||||
will require a recent Rust compiler. Those using packages or
|
||||
`pip install matrix-synapse` will not be affected.
|
||||
|
||||
The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
|
||||
|
||||
## SQLite version requirement in the next release
|
||||
|
||||
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
|
||||
higher. Synapse v1.67.0 will be the last major release supporting SQLite
|
||||
versions 3.22 to 3.26.
|
||||
|
||||
Those using Docker images or Debian packages from Matrix.org will not be
|
||||
affected. If you have installed from source, you should check the version of
|
||||
SQLite used by Python with:
|
||||
|
||||
```shell
|
||||
python -c "import sqlite3; print(sqlite3.sqlite_version)"
|
||||
```
|
||||
|
||||
If this is too old, refer to your distribution for advice on upgrading.
|
||||
|
||||
|
||||
# Upgrading to v1.66.0
|
||||
|
||||
## Delegation of email validation no longer supported
|
||||
|
||||
As of this version, Synapse no longer allows the tasks of verifying email address
|
||||
ownership, and password reset confirmation, to be delegated to an identity server.
|
||||
This removal was previously planned for Synapse 1.64.0, but was
|
||||
[delayed](https://github.com/matrix-org/synapse/issues/13421) until now to give
|
||||
homeserver administrators more notice of the change.
|
||||
|
||||
To continue to allow users to add email addresses to their homeserver accounts,
|
||||
and perform password resets, make sure that Synapse is configured with a working
|
||||
email server in the [`email` configuration
|
||||
section](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
|
||||
(including, at a minimum, a `notif_from` setting.)
|
||||
|
||||
Specifying an `email` setting under `account_threepid_delegates` will now cause
|
||||
an error at startup.
|
||||
|
||||
# Upgrading to v1.64.0
|
||||
|
||||
## Deprecation of the ability to delegate e-mail verification to identity servers
|
||||
@@ -1442,7 +1181,7 @@ updated.
|
||||
When setting up worker processes, we now recommend the use of a Redis
|
||||
server for replication. **The old direct TCP connection method is
|
||||
deprecated and will be removed in a future release.** See
|
||||
the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details.
|
||||
[workers](workers.md) for more details.
|
||||
|
||||
# Upgrading to v1.14.0
|
||||
|
||||
|
||||
@@ -5,9 +5,8 @@
|
||||
Many of the API calls in the admin api will require an `access_token` for a
|
||||
server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
An existing user can be marked as a server admin by updating the database directly.
|
||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||
|
||||
Check your [database settings](config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator.
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
@@ -19,7 +18,7 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
This API allows you to manage tokens which can be used to authenticate
|
||||
registration requests, as proposed in
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
|
||||
and stabilised in version 1.2 of the Matrix specification.
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
|
||||
To use it, you will need to enable the `registration_requires_token` config
|
||||
option, and authenticate by providing an `access_token` for a server admin:
|
||||
see [Admin API](../admin_api).
|
||||
see [Admin API](../../usage/administration/admin_api).
|
||||
Note that this API is still experimental; not all clients may support it yet.
|
||||
|
||||
|
||||
## Registration token objects
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
How do I become a server admin?
|
||||
---
|
||||
If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins.
|
||||
If your server already has an admin account you should use the user admin API to promote other accounts to become admins. See [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not)
|
||||
|
||||
If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes.
|
||||
If you don't have any admin accounts yet you won't be able to use the admin API so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account, use the admin APIs to make further changes.
|
||||
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
@@ -32,11 +32,9 @@ What users are registered on my server?
|
||||
SELECT NAME from users;
|
||||
```
|
||||
|
||||
Manually resetting passwords
|
||||
Manually resetting passwords:
|
||||
---
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a user's password using the [admin API](../../admin_api/user_admin_api.md#reset-password).
|
||||
|
||||
See https://github.com/matrix-org/synapse/blob/master/README.rst#password-reset
|
||||
|
||||
I have a problem with my server. Can I just delete my database and start again?
|
||||
---
|
||||
@@ -79,7 +77,7 @@ Here we can see that the request has been tagged with `GET-37`. (The tag depends
|
||||
grep 'GET-37' homeserver.log
|
||||
```
|
||||
|
||||
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
|
||||
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code).
|
||||
|
||||
|
||||
What do all those fields in the 'Processed' line mean?
|
||||
@@ -103,83 +101,3 @@ LIMIT 10;
|
||||
|
||||
You can also use the [List Room API](../../admin_api/rooms.md#list-room-api)
|
||||
and `order_by` `state_events`.
|
||||
|
||||
|
||||
People can't accept room invitations from me
|
||||
---
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs:
|
||||
|
||||
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See [the reverse proxy docs](docs/reverse_proxy.md) and double-check that your settings are correct.
|
||||
|
||||
|
||||
Help!! Synapse is slow and eats all my RAM/CPU!
|
||||
-----------------------------------------------
|
||||
|
||||
First, ensure you are running the latest version of Synapse, using Python 3
|
||||
with a [PostgreSQL database](../../postgres.md).
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained environments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
Using [libjemalloc](https://jemalloc.net) can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the `libjemalloc1` package and adding this
|
||||
line to `/etc/default/matrix-synapse`:
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This made a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
If you're encountering high CPU use by the Synapse process itself, you
|
||||
may be affected by a bug with presence tracking that leads to a
|
||||
massive excess of outgoing federation requests (see [discussion](https://github.com/matrix-org/synapse/issues/3971)). If metrics
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by disabling presence
|
||||
in the Synapse config file: [see here](../configuration/config_documentation.md#presence).
|
||||
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If Synapse runs out of file handles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like `#matrix:matrix.org` containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of March 2019 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at [`#synapse:matrix.org`](https://matrix.to/#/#synapse-dev:matrix.org) if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# Monthly Active Users
|
||||
|
||||
Synapse can be configured to record the number of monthly active users (also referred to as MAU) on a given homeserver.
|
||||
For clarity's sake, MAU only tracks local users.
|
||||
|
||||
Please note that the metrics recorded by the [Homeserver Usage Stats](../../usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
|
||||
are calculated differently. The `monthly_active_users` from the usage stats does not take into account any
|
||||
of the rules below, and counts any users who have made a request to the homeserver in the last 30 days.
|
||||
|
||||
See the [configuration manual](../../usage/configuration/config_documentation.md#limit_usage_by_mau) for details on how to configure MAU.
|
||||
|
||||
## Calculating active users
|
||||
|
||||
Individual user activity is measured in active days. If a user performs an action, the exact time of that action is then recorded. When
|
||||
calculating the MAU figure, any users with a recorded action in the last 30 days are considered part of the cohort. Days are measured
|
||||
as a rolling window from the current system time to 30 days ago.
|
||||
|
||||
So for example, if Synapse were to calculate the active users on the 15th July at 13:25, it would include any activity from 15th June 13:25 onwards.
|
||||
|
||||
A user is **never** considered active if they are either:
|
||||
- Part of the trial day cohort (described below)
|
||||
- Owned by an application service.
|
||||
- Note: This **only** covers users that are part of an application service `namespaces.users` registration. The namespace
|
||||
must also be marked as `exclusive`.
|
||||
|
||||
Otherwise, any request to Synapse will mark the user as active. Please note that registration will not mark a user as active *unless*
|
||||
they register with a 3pid that is included in the config field `mau_limits_reserved_threepids`.
|
||||
|
||||
The Prometheus metric for MAU is refreshed every 5 minutes.
|
||||
|
||||
Once an hour, Synapse checks to see if any users are inactive (with only activity timestamps later than 30 days). These users
|
||||
are removed from the active users cohort. If they then become active, they are immediately restored to the cohort.
|
||||
|
||||
It is important to note that **deactivated** users are not immediately removed from the pool of active users, but as these users won't
|
||||
perform actions they will eventually be removed from the cohort.
|
||||
|
||||
### Trial days
|
||||
|
||||
If the config option `mau_trial_days` is set, a user must have been active this many days **after** registration to be active. A user is in the
|
||||
trial period if their registration timestamp (also known as the `creation_ts`) is less than `mau_trial_days` old.
|
||||
|
||||
As an example, if `mau_trial_days` is set to `3` and a user is active **after** 3 days (72 hours from registration time) then they will be counted as active.
|
||||
|
||||
The `mau_appservice_trial_days` config further extends this rule by applying different durations depending on the `appservice_id` of the user.
|
||||
Users registered by an application service will be recorded with an `appservice_id` matching the `id` key in the registration file for that service.
|
||||
|
||||
|
||||
## Limiting usage of the homeserver when the maximum MAU is reached
|
||||
|
||||
If both config options `limit_usage_by_mau` and `max_mau_value` is set, and the current MAU value exceeds the maximum value, the
|
||||
homeserver will begin to block some actions.
|
||||
|
||||
Individual users matching **any** of the below criteria never have their actions blocked:
|
||||
- Considered part of the cohort of MAU users.
|
||||
- Considered part of the trial period.
|
||||
- Registered as a `support` user.
|
||||
- Application service users if `track_appservice_user_ips` is NOT set.
|
||||
|
||||
Please not that server admins are **not** exempt from blocking.
|
||||
|
||||
The following actions are blocked when the MAU limit is exceeded:
|
||||
- Logging in
|
||||
- Sending events
|
||||
- Creating rooms
|
||||
- Syncing
|
||||
|
||||
Registration is also blocked for all new signups *unless* the user is registering with a threepid included in the `mau_limits_reserved_threepids`
|
||||
config value.
|
||||
|
||||
When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMIT_EXCEEDED`.
|
||||
|
||||
## Metrics
|
||||
|
||||
Synapse records several different prometheus metrics for MAU.
|
||||
|
||||
`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
|
||||
|
||||
`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
|
||||
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
|
||||
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
|
||||
|
||||
`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
registered accounts on the homeserver.
|
||||
@@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo
|
||||
|
||||
| Part | Explanation |
|
||||
| ----- | ------------ |
|
||||
| AAAA | Timestamp request was logged (not received) |
|
||||
| AAAA | Timestamp request was logged (not recieved) |
|
||||
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
|
||||
| CCCC | Line number in code |
|
||||
| DDDD | Log Level |
|
||||
| EEEE | Request Identifier (This identifier is shared by related log lines)|
|
||||
| FFFF | Source IP (Or X-Forwarded-For if enabled) |
|
||||
| GGGG | Server Port |
|
||||
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).<br/>If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). |
|
||||
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) |
|
||||
| IIII | Total Time to process the request |
|
||||
| JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)|
|
||||
| KKKK | Userland CPU time |
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.74';
|
||||
window.SYNAPSE_VERSION = 'v1.64';
|
||||
|
||||
227
docs/workers.md
227
docs/workers.md
@@ -32,8 +32,13 @@ stream between all configured Synapse processes. Additionally, processes may
|
||||
make HTTP requests to each other, primarily for operations which need to wait
|
||||
for a reply ─ such as sending an event.
|
||||
|
||||
All the workers and the main process connect to Redis, which relays replication
|
||||
commands between processes.
|
||||
Redis support was added in v1.13.0 with it becoming the recommended method in
|
||||
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
|
||||
v1.18.0) to the main process. With Redis, rather than all the workers connecting
|
||||
to the main process, all the workers and the main process connect to Redis,
|
||||
which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
If Redis support is enabled Synapse will use it as a shared cache, as well as a
|
||||
pub/sub mechanism.
|
||||
@@ -88,12 +93,11 @@ shared configuration file.
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. Optionally, a shared secret can be used to authenticate HTTP
|
||||
traffic between workers. For example:
|
||||
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
@@ -113,30 +117,23 @@ redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
for the full documentation of each option.
|
||||
See the sample config for the full documentation of each option.
|
||||
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; replication traffic is:
|
||||
|
||||
* always unencrypted
|
||||
* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
is configured
|
||||
public internet; it has no authentication and is unencrypted.
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify:
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
In the config file for each worker, you must specify the type of worker
|
||||
application (`worker_app`), and you should specify a unique name for the worker
|
||||
(`worker_name`). The currently available worker applications are listed below.
|
||||
You must also specify the HTTP replication endpoint that it should talk to on
|
||||
the main synapse process. `worker_replication_host` should specify the host of
|
||||
the main synapse and `worker_replication_http_port` should point to the HTTP
|
||||
replication port. If the worker will handle HTTP requests then the
|
||||
`worker_listeners` option should be set with a `http` listener, in the same way
|
||||
as the `listeners` option in the shared config.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -151,6 +148,7 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
@@ -191,7 +189,6 @@ information.
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/timestamp_to_event/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/key/v2/query
|
||||
@@ -208,8 +205,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
||||
^/_matrix/client/v1/rooms/.*/threads$
|
||||
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
@@ -219,7 +214,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
@@ -227,7 +221,6 @@ information.
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
^/_matrix/client/(r0|v3|unstable)/room_keys/
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload/
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||
@@ -292,10 +285,8 @@ For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
|
||||
with `client` and `federation` `resources` must be configured in the
|
||||
[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
|
||||
option in the worker config.
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
configured in the `worker_listeners` option in the worker config.
|
||||
|
||||
#### Load balancing
|
||||
|
||||
@@ -306,11 +297,9 @@ may wish to run multiple groups of workers handling different endpoints so that
|
||||
load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. This can
|
||||
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
|
||||
`hash $http_authorization consistent;` which contains the users access token.
|
||||
|
||||
Admins may additionally wish to separate out `/sync`
|
||||
requests from a particular user are routed to a single instance. Extracting a
|
||||
user ID from the access token or `Authorization` header is currently left as an
|
||||
exercise for the reader. Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
`/initialSync`), as requests that don't are known as "initial sync" that happens
|
||||
when a user logs in on a new device and can be *very* resource intensive, so
|
||||
@@ -336,13 +325,12 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
(This is only supported with Redis-based replication.)
|
||||
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
@@ -369,26 +357,9 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream experimentally supports having multiple writer workers, where load
|
||||
is sharded between them by room ID. Each writer is called an _event persister_. They are
|
||||
responsible for
|
||||
- receiving new events,
|
||||
- linking them to those already in the room [DAG](development/room-dag-concepts.md),
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
An `event_creator` listens for requests from clients to create new events and does
|
||||
so. It will then pass those events over HTTP replication to any configured event
|
||||
persisters (or the main process if none are configured).
|
||||
|
||||
Note that `event_creator`s and `event_persister`s are implemented using the same
|
||||
[`synapse.app.generic_worker`](#synapse.app.generic_worker).
|
||||
|
||||
An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
configuration with multiple writers:
|
||||
|
||||
```yaml
|
||||
@@ -440,20 +411,18 @@ the stream writer for the `presence` stream:
|
||||
There is also support for moving background tasks to a separate
|
||||
worker. Background tasks are run periodically or started via replication. Exactly
|
||||
which tasks are configured to run depends on your Synapse configuration (e.g. if
|
||||
stats is enabled). This worker doesn't handle any REST endpoints itself.
|
||||
stats is enabled).
|
||||
|
||||
To enable this, the worker must have a unique
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and can be configured to run background tasks. For example, to move background tasks
|
||||
to a dedicated worker, the shared configuration would include:
|
||||
To enable this, the worker must have a `worker_name` and can be configured to run
|
||||
background tasks. For example, to move background tasks to a dedicated worker,
|
||||
the shared configuration would include:
|
||||
|
||||
```yaml
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the
|
||||
[`update_user_directory_from_worker`](#updating-the-user-directory) and
|
||||
[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
|
||||
@@ -489,8 +458,8 @@ worker application type.
|
||||
#### Notifying Application Services
|
||||
|
||||
You can designate one generic worker to send output traffic to Application Services.
|
||||
Doesn't handle any REST endpoints itself, but you should specify its name in the
|
||||
shared configuration as follows:
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
notify_appservices_from_worker: worker_name
|
||||
@@ -505,32 +474,19 @@ worker application type.
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
It is likely this option will be deprecated in the future and is not recommended for new
|
||||
installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set
|
||||
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
To run multiple instances at once the
|
||||
[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
|
||||
option should list all pusher instances by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
An example for a pusher instance:
|
||||
|
||||
```yaml
|
||||
{{#include systemd-with-workers/workers/pusher_worker.yaml}}
|
||||
```
|
||||
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
@@ -546,35 +502,21 @@ Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
It is likely this option will be deprecated in the future and not recommended for
|
||||
new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set
|
||||
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
|
||||
in the shared configuration file to stop the main synapse sending this traffic.
|
||||
REST endpoints itself, but you should set `send_federation: False` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
If running multiple federation senders then you must list each
|
||||
instance in the
|
||||
[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
|
||||
option by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name).
|
||||
instance in the `federation_sender_instances` option by their `worker_name`.
|
||||
All instances must be stopped and started when adding or removing instances.
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
An example for a federation sender instance:
|
||||
|
||||
```yaml
|
||||
{{#include systemd-with-workers/workers/federation_sender.yaml}}
|
||||
```
|
||||
|
||||
### `synapse.app.media_repository`
|
||||
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
@@ -590,19 +532,21 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
^/_synapse/admin/v1/users/.*/media$
|
||||
|
||||
You should also set
|
||||
[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
|
||||
in the shared configuration
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository. Note that doing so will prevent the main process from being
|
||||
able to handle the above endpoints.
|
||||
|
||||
In the `media_repository` worker configuration file, configure the
|
||||
[HTTP listener](usage/configuration/config_documentation.md#listeners) to
|
||||
In the `media_repository` worker configuration file, configure the http listener to
|
||||
expose the `media` resource. For example:
|
||||
|
||||
```yaml
|
||||
{{#include systemd-with-workers/workers/media_worker.yaml}}
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
resources:
|
||||
- names:
|
||||
- media
|
||||
```
|
||||
|
||||
Note that if running multiple media repositories they must be on the same server
|
||||
@@ -637,25 +581,52 @@ handle it, and are online.
|
||||
If `update_user_directory` is set to `false`, and this worker is not running,
|
||||
the above endpoint may give outdated results.
|
||||
|
||||
### `synapse.app.frontend_proxy`
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
load from the main synapse. It can handle REST endpoints matching the following
|
||||
regular expressions:
|
||||
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/upload
|
||||
|
||||
If `use_presence` is False in the homeserver config, it can also handle REST
|
||||
endpoints matching the following regular expressions:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
|
||||
|
||||
This "stub" presence handler will pass through `GET` request but make the
|
||||
`PUT` effectively a no-op.
|
||||
|
||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||
must therefore be configured with the location of the main instance, via
|
||||
the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
|
||||
file. For example:
|
||||
|
||||
```yaml
|
||||
worker_main_http_uri: http://127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Historical apps
|
||||
|
||||
The following used to be separate worker application types, but are now
|
||||
equivalent to `synapse.app.generic_worker`:
|
||||
|
||||
* `synapse.app.client_reader`
|
||||
* `synapse.app.event_creator`
|
||||
* `synapse.app.federation_reader`
|
||||
* `synapse.app.federation_sender`
|
||||
* `synapse.app.frontend_proxy`
|
||||
* `synapse.app.pusher`
|
||||
* `synapse.app.synchrotron`
|
||||
*Note:* Historically there used to be more apps, however they have been
|
||||
amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
|
||||
are ones that do specific processing unrelated to requests, e.g. the `pusher`
|
||||
that handles sending out push notifications for new events. The intention is for
|
||||
all these to be folded into the `generic_worker` app and to use config to define
|
||||
which processes handle the various proccessing such as push notifications.
|
||||
|
||||
|
||||
## Migration from old config
|
||||
|
||||
A main change that has occurred is the merging of worker apps into
|
||||
`synapse.app.generic_worker`. This change is backwards compatible and so no
|
||||
changes to the config are required.
|
||||
There are two main independent changes that have been made: introducing Redis
|
||||
support and merging apps into `synapse.app.generic_worker`. Both these changes
|
||||
are backwards compatible and so no changes to the config are required, however
|
||||
server admins are encouraged to plan to migrate to Redis as the old style direct
|
||||
TCP replication config is deprecated.
|
||||
|
||||
To migrate to Redis add the `redis` config as above, and optionally remove the
|
||||
TCP `replication` listener from master and `worker_replication_port` from worker
|
||||
config.
|
||||
|
||||
To migrate apps to use `synapse.app.generic_worker` simply update the
|
||||
`worker_app` option in the worker configs, and where worker are started (e.g.
|
||||
|
||||
64
mypy.ini
64
mypy.ini
@@ -1,6 +1,6 @@
|
||||
[mypy]
|
||||
namespace_packages = True
|
||||
plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
@@ -11,15 +11,12 @@ warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
strict_equality = True
|
||||
warn_redundant_casts = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
synapse/,
|
||||
tests/,
|
||||
build_rust.py
|
||||
tests/
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
@@ -58,8 +55,24 @@ exclude = (?x)
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
|tests/util/caches/test_deferred_cache.py
|
||||
|tests/util/caches/test_descriptors.py
|
||||
|tests/util/caches/test_response_cache.py
|
||||
|tests/util/caches/test_ttlcache.py
|
||||
|tests/util/test_async_helpers.py
|
||||
|tests/util/test_batching_queue.py
|
||||
|tests/util/test_dict_cache.py
|
||||
|tests/util/test_expiring_cache.py
|
||||
|tests/util/test_file_consumer.py
|
||||
|tests/util/test_linearizer.py
|
||||
|tests/util/test_logcontext.py
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -89,51 +102,31 @@ disallow_untyped_defs = False
|
||||
[mypy-tests.*]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.config.test_api]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.handlers.test_sso]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.metrics.test_background_process_metrics]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.test_server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.types.*]
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.*]
|
||||
[mypy-tests.storage.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.*]
|
||||
[mypy-tests.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
;; The `typeshed` project maintains stubs here:
|
||||
@@ -188,6 +181,3 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
1271
poetry.lock
generated
1271
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -52,12 +52,9 @@ include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
|
||||
[tool.maturin]
|
||||
manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.74.0"
|
||||
version = "1.64.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -85,17 +82,7 @@ include = [
|
||||
{ path = "sytest-blacklist", format = "sdist" },
|
||||
{ path = "tests", format = "sdist" },
|
||||
{ path = "UPGRADE.rst", format = "sdist" },
|
||||
{ path = "Cargo.toml", format = "sdist" },
|
||||
{ path = "Cargo.lock", format = "sdist" },
|
||||
{ path = "rust/Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/build.rs", format = "sdist" },
|
||||
{ path = "rust/src/**", format = "sdist" },
|
||||
]
|
||||
exclude = [
|
||||
{ path = "synapse/*.so", format = "sdist"}
|
||||
]
|
||||
|
||||
build = "build_rust.py"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
@@ -139,10 +126,9 @@ pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.11"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
bcrypt = ">=3.1.0"
|
||||
Pillow = ">=5.4.0"
|
||||
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
|
||||
sortedcontainers = ">=1.5.2"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
pymacaroons = ">=0.13.0"
|
||||
msgpack = ">=0.5.2"
|
||||
phonenumbers = ">=8.2.0"
|
||||
@@ -166,24 +152,12 @@ typing-extensions = ">=3.10.0.1"
|
||||
cryptography = ">=3.4.7"
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
matrix-common = "^1.2.1"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --no-dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
@@ -193,7 +167,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.15.1", optional = true }
|
||||
authlib = { version = ">=0.14.0", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
@@ -208,7 +182,6 @@ hiredis = { version = "*", optional = true }
|
||||
Pympler = { version = "*", optional = true }
|
||||
parameterized = { version = ">=0.7.4", optional = true }
|
||||
idna = { version = ">=2.5", optional = true }
|
||||
pyicu = { version = ">=2.10.2", optional = true }
|
||||
|
||||
[tool.poetry.extras]
|
||||
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
|
||||
@@ -221,7 +194,7 @@ oidc = ["authlib"]
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
systemd = ["systemd-python"]
|
||||
url-preview = ["lxml"]
|
||||
url_preview = ["lxml"]
|
||||
sentry = ["sentry-sdk"]
|
||||
opentracing = ["jaeger-client", "opentracing"]
|
||||
jwt = ["authlib"]
|
||||
@@ -229,12 +202,8 @@ jwt = ["authlib"]
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
redis = ["txredisapi", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache-memory = ["pympler"]
|
||||
cache_memory = ["pympler"]
|
||||
test = ["parameterized", "idna"]
|
||||
# Allows for better search for international characters in the user directory. This
|
||||
# requires libicu's development headers installed on the system (e.g. libicu-dev on
|
||||
# Debian-based distributions).
|
||||
user-search = ["pyicu"]
|
||||
|
||||
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
|
||||
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
|
||||
@@ -256,7 +225,7 @@ all = [
|
||||
"pysaml2",
|
||||
# oidc and jwt
|
||||
"authlib",
|
||||
# url-preview
|
||||
# url_preview
|
||||
"lxml",
|
||||
# sentry
|
||||
"sentry-sdk",
|
||||
@@ -264,10 +233,8 @@ all = [
|
||||
"jaeger-client", "opentracing",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache-memory
|
||||
# cache_memory
|
||||
"pympler",
|
||||
# improved user search
|
||||
"pyicu",
|
||||
# omitted:
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
@@ -275,10 +242,10 @@ all = [
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
## We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
isort = "==5.7.0"
|
||||
black = "==22.3.0"
|
||||
flake8-comprehensions = "*"
|
||||
flake8-bugbear = ">=21.3.2"
|
||||
flake8-bugbear = "==21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
@@ -304,46 +271,16 @@ parameterized = ">=0.7.4"
|
||||
idna = ">=2.5"
|
||||
|
||||
# The following are used by the release script
|
||||
click = ">=8.1.3"
|
||||
click = "==8.1.1"
|
||||
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
|
||||
GitPython = ">=3.1.20"
|
||||
commonmark = ">=0.9.1"
|
||||
pygithub = ">=1.55"
|
||||
commonmark = "==0.9.1"
|
||||
pygithub = "==1.55"
|
||||
# The following are executed as commands by the release script.
|
||||
twine = "*"
|
||||
# Towncrier min version comes from #3425. Rationale unclear.
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# #13849 and #14079 where we see buildtime or runtime errors caused by build
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
# can end up polluting the next build with a .so that is for the wrong
|
||||
# Python version.
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
|
||||
[tool.cibuildwheel.linux]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
|
||||
|
||||
[tool.cibuildwheel.macos]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user