1
0

Compare commits

..

4 Commits

Author SHA1 Message Date
Paul "LeoNerd" Evans
01e83c9680 Merge branch 'develop' into paul/schema_breaking_changes 2014-09-03 18:21:55 +01:00
Paul "LeoNerd" Evans
9705706a7f Merge branch 'develop' into paul/schema_breaking_changes 2014-09-03 17:40:42 +01:00
Paul "LeoNerd" Evans
801a551da1 Rename 'mtime' DB field to 'last_active', adjusted semantics 2014-09-03 17:04:58 +01:00
Paul "LeoNerd" Evans
b8906b0ea8 Kill the 'state' presence key in DB, name it 'presence' instead to match the over-the-wire API 2014-09-03 16:11:34 +01:00
1760 changed files with 70862 additions and 365326 deletions

View File

@@ -1,91 +0,0 @@
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
{{- /*
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
we are creating a stylized header for each package.
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
which is under the Unlicense licence.
*/ -}}
{{- $settings := .Settings -}}
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
{{- if eq .Result "PASS" -}}
{{ "\033" }}[0;32m
{{- else if eq .Result "SKIP" -}}
{{ "\033" }}[0;33m
{{- else -}}
{{ "\033" }}[0;31m
{{- end -}}
📦 {{ .Name }}{{- "\033" }}[0m
{{- with .Coverage -}}
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
{{- end -}}
{{- "\n" -}}
{{- with .Reason -}}
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
{{- end -}}
{{- with .Output -}}
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Passing tests are first */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* and failing tests are last */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- "\n" -}}
{{- end -}}

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,19 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the script to connect to the postgresql database that will be available in the
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "psycopg2"
args:
user: postgres
host: localhost
password: postgres
database: synapse
# Suppress the key server warning.
trusted_key_servers: []

View File

@@ -1,132 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
# compatible wheel, if so rename the wheel before repairing it.
import argparse
import os
import subprocess
from typing import Optional
from zipfile import ZipFile
from packaging.tags import Tag
from packaging.utils import parse_wheel_filename
from packaging.version import Version
def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}")
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
"""Entry point"""
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
# normalizes the package name (i.e. it converts matrix_synapse ->
# matrix-synapse), which is not what we want.
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
name = os.path.basename(wheel_file).split("-")[0]
if len(tags) != 1:
# We expect only a wheel file with only a single tag
raise Exception(f"Unexpectedly found multiple tags: {tags}")
tag = next(iter(tags))
if build:
# We don't use build tags in Synapse
raise Exception(f"Unexpected build tag: {build}")
# If the wheel is for cpython then convert it into an abi3 wheel.
if tag.interpreter.startswith("cp"):
wheel_file = cpython(wheel_file, name, version, tag)
# Finally, repair the wheel.
if archs is not None:
# If we are given archs then we are on macos and need to use
# `delocate-listdeps`.
subprocess.run(["delocate-listdeps", wheel_file], check=True)
subprocess.run(
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
check=True,
)
else:
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
parser.add_argument(
"--wheel-dir",
"-w",
metavar="WHEEL_DIR",
help="Directory to store delocated wheels",
required=True,
)
parser.add_argument(
"--require-archs",
metavar="archs",
default=None,
)
parser.add_argument(
"wheel_file",
metavar="WHEEL_FILE",
)
args = parser.parse_args()
wheel_file = args.wheel_file
wheel_dir = args.wheel_dir
archs = args.require_archs
main(wheel_file, wheel_dir, archs)

View File

@@ -1,135 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calculate the trial jobs to run based on if we're in a PR or not.
import json
import os
def set_output(key: str, value: str):
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
print(f"{key}={value}", file=f)
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
]
if not IS_PR:
trial_sqlite_tests.extend(
{
"python-version": version,
"database": "sqlite",
"extras": "all",
}
for version in ("3.8", "3.9", "3.10", "3.11")
)
trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
}
]
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.11",
"database": "postgres",
"postgres-version": "15",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
]
print("::group::Calculated trial jobs")
print(
json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
)
)
print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "focal",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)
print("::group::Calculated sytest jobs")
print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
set_output("sytest_test_matrix", test_matrix)

View File

@@ -1,25 +0,0 @@
#!/bin/bash
#
# Fetches a version of complement which best matches the current build.
#
# The tarball is unpacked into `./complement`.
set -e
mkdir -p complement
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
# etc. we need to use different fallbacks:
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done

View File

@@ -1,21 +0,0 @@
#!/bin/bash
#
# wraps `gotestfmt`, hiding output from successful packages unless
# all tests passed.
set -o pipefail
set -e
# tee the test results to a log, whilst also piping them into gotestfmt,
# telling it to hide successful results, so that we can clearly see
# unsuccessful results.
tee complement.log | gotestfmt -hide successful-packages
# gotestfmt will exit non-zero if there were any failures, so if we got to this
# point, we must have had a successful result.
echo "All tests successful; showing all test results"
# Pipe the test results back through gotestfmt, showing all results.
# The log file consists of JSON lines giving the test results, interspersed
# with regular stdout lines (including reports of downloaded packages).
grep '^{"Time":' complement.log | gotestfmt

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
set -ex
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.2.0
poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"

View File

@@ -1,36 +0,0 @@
#!/bin/sh
#
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
#
# Must be called after Synapse has been checked out to `synapse/`.
#
set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
endblock
block Check out Complement
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
synapse/.ci/scripts/checkout_complement.sh
endblock

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
exit 1
fi
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."
exit 1
fi

View File

@@ -1,67 +0,0 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe -o pipefail
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
# Now do the same again, on an empty database.
echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -1,16 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
# schema and run background updates on it.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "sqlite3"
args:
database: ".ci/test_db.db"
# Suppress the key server warning.
trusted_key_servers: []

Binary file not shown.

View File

@@ -1,4 +0,0 @@
---
title: CI run against Twisted trunk is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,2 +0,0 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse when run under worker mode. For more details, see sytest-blacklist.

View File

@@ -1,14 +0,0 @@
comment: off
coverage:
status:
project:
default:
target: 0 # Target % coverage, can be auto. Turned off for now
threshold: null
base: auto
patch:
default:
target: 0
threshold: null
base: auto

View File

@@ -1,8 +0,0 @@
[run]
branch = True
parallel = True
include=$TOP/synapse/*
data_file = $TOP/.coverage
[report]
precision = 2

View File

@@ -1,18 +0,0 @@
# ignore everything by default
*
# things to include
!docker
!synapse
!rust
!README.rst
!pyproject.toml
!poetry.lock
!Cargo.lock
!Cargo.toml
!build_rust.py
rust/target
synapse/*.so
**/__pycache__

View File

@@ -1,10 +0,0 @@
# EditorConfig https://EditorConfig.org
# top-most EditorConfig file
root = true
# 4 space indentation
[*.py]
indent_style = space
indent_size = 4
max_line_length = 88

18
.flake8
View File

@@ -1,18 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
#
# flake8-bugbear runs extra checks. Its error codes are described at
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
# B023: Functions defined inside a loop must not use variables redefined in the loop
# B024: Abstract base class with no abstract method.
ignore=W503,W504,E203,E731,E501,B019,B023,B024

View File

@@ -1,24 +0,0 @@
# Commits in this file will be removed from GitHub blame results.
#
# To use this file locally, use:
# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" <files>
#
# or configure the `blame.ignoreRevsFile` option in your git config.
#
# If ignoring a pull request that was not squash merged, only the merge
# commit needs to be put here. Child commits will be resolved from it.
# Run black (#3679).
8b3d9b6b199abb87246f982d5db356f1966db925
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
# Target Python 3.5 with black (#8664).
aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

2
.github/CODEOWNERS vendored
View File

@@ -1,2 +0,0 @@
# Automatically request reviews from the synapse-core team when a pull request comes in.
* @matrix-org/synapse-core

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
# One username per supported platform and one custom link
patreon: matrixdotorg
liberapay: matrixdotorg
custom: https://paypal.me/matrixdotorg

View File

@@ -1,5 +0,0 @@
**If you are looking for support** please ask in **#synapse:matrix.org**
(using a matrix.org account if necessary). We do not use GitHub issues for
support.
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/

View File

@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
You can also preview your report before submitting it.
- type: textarea
id: description
attributes:
label: Description
description: Describe the problem that you are experiencing
validations:
required: true
- type: textarea
id: reproduction_steps
attributes:
label: Steps to reproduce
description: |
Describe the series of steps that leads you to the problem.
Describe how what happens differs from what you expected.
placeholder: Tell us what you see!
value: |
- list the steps
- that reproduce the bug
- using hyphens as bullet points
validations:
required: true
- type: markdown
attributes:
value: |
---
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
- type: input
id: homeserver
attributes:
label: Homeserver
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
validations:
required: true
- type: input
id: version
attributes:
label: Synapse Version
description: |
What version of Synapse is this homeserver running?
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
or with this command:
```
$ curl http://localhost:8008/_synapse/admin/v1/server_version
```
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
validations:
required: true
- type: dropdown
id: install_method
attributes:
label: Installation Method
options:
- Docker (matrixdotorg/synapse)
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- type: textarea
id: platform
attributes:
label: Platform
description: |
Tell us about the environment in which your homeserver is operating...
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true
- type: textarea
id: anything_else
attributes:
label: Anything else that would be useful to know?

View File

@@ -1,9 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
---
**Description:**
<!-- Describe here the feature you are requesting. -->

View File

@@ -1,10 +0,0 @@
---
name: Support request
about: I need support for Synapse
---
Please don't file github issues asking for support.
Instead, please join [`#synapse:matrix.org`](https://matrix.to/#/#synapse:matrix.org)
(from a matrix.org account if necessary), and ask there.

View File

@@ -1,14 +0,0 @@
### Pull Request Checklist
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))

3
.github/SUPPORT.md vendored
View File

@@ -1,3 +0,0 @@
[**#synapse:matrix.org**](https://matrix.to/#/#synapse:matrix.org) is the official support room for
Synapse, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html.
Please ask for support there, rather than filing github issues.

View File

@@ -1,23 +0,0 @@
version: 2
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "cargo"
directory: "/"
versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"

View File

@@ -1,46 +0,0 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
run: |
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View File

@@ -1,62 +0,0 @@
# GitHub actions workflow which builds and publishes the docker images.
name: Build docker images
on:
push:
tags: ["v*"]
branches: [ master, main, develop ]
workflow_dispatch:
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@@ -1,34 +0,0 @@
name: Deploy documentation PR preview
on:
workflow_run:
workflows: [ "Prepare documentation PR preview" ]
types:
- completed
jobs:
netlify:
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
runs-on: ubuntu-latest
steps:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
name: book
path: book
- name: 📤 Deploy to Netlify
uses: matrix-org/netlify-pr-preview@v1
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
branch: ${{ github.event.workflow_run.head_branch }}
revision: ${{ github.event.workflow_run.head_sha }}
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
site_id: ${{ secrets.NETLIFY_SITE_ID }}
desc: Documentation preview
deployment_env: PR Documentation Preview

View File

@@ -1,34 +0,0 @@
name: Prepare documentation PR preview
on:
pull_request:
paths:
- docs/**
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
uses: actions/upload-artifact@v3
with:
name: book
path: book
# We'll only use this in a workflow_run, then we're done with it
retention-days: 1

View File

@@ -1,65 +0,0 @@
name: Deploy the documentation
on:
push:
branches:
# For bleeding-edge documentation
- develop
# For documentation specific to a release
- 'release-v*'
# stable docs
- master
workflow_dispatch:
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Figure out the target directory.
#
# The target directory depends on the name of the branch
#
- name: Get the target directory name
id: vars
run: |
# first strip the 'refs/heads/' prefix with some shell foo
branch="${GITHUB_REF#refs/heads/}"
case $branch in
release-*)
# strip 'release-' from the name for release branches.
branch="${branch#release-}"
;;
master)
# deploy to "latest" for the master branch.
branch="latest"
;;
esac
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ steps.vars.outputs.branch-version }}

View File

@@ -1,216 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -1,209 +0,0 @@
# GitHub actions workflow which builds the release artifacts.
name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
get-distros:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
echo "distros=$dists" >> "$GITHUB_OUTPUT"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
# now build the packages with a matrix build.
build-debs:
needs: get-distros
name: "Build .deb packages"
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
# for the cache magic here
run: |
./src/scripts-dev/build_debian_packages.py \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \
--docker-build-arg=--load \
"${{ matrix.distro }}"
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v3
with:
name: debs
path: debs/*
build-wheels:
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-11]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-11"
# Don't build aarch64 wheels on mac.
- os: "macos-11"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
arch: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Build aarch64 wheels
if: matrix.arch == 'aarch64'
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
name: Build sdist
runs-on: ubuntu-latest
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- run: pip install build
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
name: "Attach assets to release"
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-wheels
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v3
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View File

@@ -1,548 +0,0 @@
name: Tests
on:
push:
branches: ["develop", "release-*"]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
runs-on: ubuntu-latest
outputs:
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
steps:
- uses: dorny/paths-filter@v2
id: filter
# We only check on PRs
if: startsWith(github.ref, 'refs/pull/')
with:
filters: |
rust:
- 'rust/**'
- 'Cargo.toml'
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
lint-clippy:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy
lint-rustfmt:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- lint-clippy
- lint-rustfmt
runs-on: ubuntu-latest
steps:
- run: "true"
calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: calculate-test-jobs
runs-on: ubuntu-latest
strategy:
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
extras: "all test"
- run: poetry run trial -j2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
- uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
if: ${{ !failure() && !cancelled() }}
needs: calculate-test-jobs
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
export-data:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb]
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
services:
postgres:
image: postgres
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "11"
- python-version: "3.11"
postgres-version: "15"
services:
postgres:
image: postgres:${{ matrix.postgres-version }}
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags
# behind new releases, so we have to use the PostreSQL apt repository.
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
run: |
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
- cargo-test
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
with:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
# Cargo test is skipped if there is no changes on Rust code
skippable: |
lint-newsfile
cargo-test

View File

@@ -1,15 +0,0 @@
name: Move new issues into the issue triage board
on:
issues:
types: [ opened ]
jobs:
triage:
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
with:
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
content_id: ${{ github.event.issue.node_id }}
secrets:
github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}

View File

@@ -1,44 +0,0 @@
name: Move labelled issues to correct projects
on:
issues:
types: [ labeled ]
jobs:
move_needs_info:
name: Move X-Needs-Info on the triage board
runs-on: ubuntu-latest
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: actions/add-to-project@main
id: add_project
with:
project-url: "https://github.com/orgs/matrix-org/projects/67"
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
- name: Set status
env:
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
run: |
gh api graphql -f query='
mutation(
$project: ID!
$item: ID!
$fieldid: ID!
$columnid: String!
) {
updateProjectV2ItemFieldValue(
input: {
projectId: $project
itemId: $item
fieldId: $fieldid
value: {
singleSelectOptionId: $columnid
}
}
) {
projectV2Item {
id
}
}
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent

View File

@@ -1,182 +0,0 @@
name: Twisted Trunk
on:
schedule:
- cron: 0 8 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:buster
volumes:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
working-directory: synapse
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/twisted_trunk_build_failed_issue_template.md

82
.gitignore vendored
View File

@@ -1,70 +1,26 @@
# filename patterns
*~
*.pyc
.*.swp
.#*
*.deb
*.egg
*.egg-info
*.lock
*.py[cod]
*.snap
*.tac
_trial_temp/
_trial_temp*/
/out
.DS_Store
__pycache__/
logs/
dbs/
*.egg
dist/
docs/build/
*.egg-info
# We do want the poetry and cargo lockfile.
!poetry.lock
!Cargo.lock
cmdclient_config.json
homeserver*.db
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
/*.log.*
/*.log.config
/*.pid
/.python-version
/*.signing.key
/env/
/.venv*/
/homeserver*.yaml
/logs
/media_store/
/uploads
.coverage
htmlcov
# For direnv users
/.envrc
demo/*.db
demo/*.log
demo/*.pid
# IDEs
/.idea/
/.ropeproject/
/.vscode/
graph/*.svg
graph/*.png
graph/*.dot
# build products
!/.coveragerc
/.coverage*
/.mypy_cache/
/.tox
/.tox-pg-container
/build/
/coverage.*
/dist/
/docs/build/
/htmlcov
/pip-wheel-metadata/
# docs
book/
# complement
/complement-*
/master.tar.gz
# rust
/target/
/synapse/*.so
# Poetry will create a setup.py, which we don't want to include.
/setup.py
uploads

View File

@@ -1 +0,0 @@
group_imports = "StdExternalCrate"

View File

@@ -1,51 +0,0 @@
The following is an incomplete list of people outside the core team who have
contributed to Synapse. It is no longer maintained: more recent contributions
are listed in the `changelog <CHANGES.md>`_.
----
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
Brabo <brabo at riseup.net>
* Installation instruction fixes
Ivan Shapovalov <intelfx100 at gmail.com>
* contrib/systemd: a sample systemd unit file and a logger configuration
Eric Myhre <hash at exultant.us>
* Fix bug where ``media_store_path`` config option was ignored by v0 content
repository API.
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
* Add SAML2 support for registration and login.
Steven Hammerton <steven.hammerton at openmarket.com>
* Add CAS support for registration and login.
Mads Robin Christensen <mads at v42 dot dk>
* CentOS 7 installation instructions.
Florent Violleau <floviolleau at gmail dot com>
* Add Raspberry Pi installation instructions and general troubleshooting items
Niklas Riekenbrauck <nikriek at gmail dot.com>
* Add JWT support for registration and login
Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix
Joseph Weston <joseph at weston.cloud>
* Add admin API for querying HS version
Benjamin Saunders <ben.e.saunders at gmail dot com>
* Documentation improvements
Werner Sembach <werner.sembach at fau dot de>
* Automatically remove a group/community when it is empty

2605
CHANGES.md

File diff suppressed because it is too large Load Diff

85
CHANGES.rst Normal file
View File

@@ -0,0 +1,85 @@
Changes in synapse 0.2.0 (2014-09-02)
=====================================
This update changes many configuration options, updates the
database schema and mandates SSL for server-server connections.
Homeserver:
* Require SSL for server-server connections.
* Add SSL listener for client-server connections.
* Add ability to use config files.
* Add support for kicking/banning and power levels.
* Allow setting of room names and topics on creation.
* Change presence to include last seen time of the user.
* Change url path prefix to /_matrix/...
* Bug fixes to presence.
Webclient:
* Reskin the CSS for registration and login.
* Various improvements to rooms CSS.
* Support changes in client-server API.
* Bug fixes to VOIP UI.
* Various bug fixes to handling of changes to room member list.
Changes in synapse 0.1.2 (2014-08-29)
=====================================
Webclient:
* Add basic call state UI for VoIP calls.
Changes in synapse 0.1.1 (2014-08-29)
=====================================
Homeserver:
* Fix bug that caused the event stream to not notify some clients about
changes.
Changes in synapse 0.1.0 (2014-08-29)
=====================================
Presence has been reenabled in this release.
Homeserver:
* Update client to server API, including:
- Use a more consistent url scheme.
- Provide more useful information in the initial sync api.
* Change the presence handling to be much more efficient.
* Change the presence server to server API to not require explicit polling of
all users who share a room with a user.
* Fix races in the event streaming logic.
Webclient:
* Update to use new client to server API.
* Add basic VOIP support.
* Add idle timers that change your status to away.
* Add recent rooms column when viewing a room.
* Various network efficiency improvements.
* Add basic mobile browser support.
* Add a settings page.
Changes in synapse 0.0.1 (2014-08-22)
=====================================
Presence has been disabled in this release due to a bug that caused the
homeserver to spam other remote homeservers.
Homeserver:
* Completely change the database schema to support generic event types.
* Improve presence reliability.
* Improve reliability of joining remote rooms.
* Fix bug where room join events were duplicated.
* Improve initial sync API to return more information to the client.
* Stop generating fake messages for room membership events.
Webclient:
* Add tab completion of names.
* Add ability to upload and send images.
* Add profile pages.
* Improve CSS layout of room.
* Disambiguate identical display names.
* Don't get remote users display names and avatars individually.
* Use the new initial sync API to reduce number of round trips to the homeserver.
* Change url scheme to use room aliases instead of room ids where known.
* Increase longpoll timeout.
Changes in synapse 0.0.0 (2014-08-13)
=====================================
* Initial alpha release

View File

@@ -1,3 +0,0 @@
# Welcome to Synapse
Please see the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html) in our rendered documentation.

466
Cargo.lock generated
View File

@@ -1,466 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
[[package]]
name = "arc-swap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
dependencies = [
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "digest"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "indoc"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "proc-macro2"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-log"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
dependencies = [
"arc-swap",
"log",
"pyo3",
]
[[package]]
name = "pyo3-macros"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pythonize"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
dependencies = [
"pyo3",
"serde",
]
[[package]]
name = "quote"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
"hex",
"lazy_static",
"log",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
]
[[package]]
name = "target-lexicon"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -1,5 +0,0 @@
# We make the whole Synapse folder a workspace so that we can run `cargo`
# commands from the root (rather than having to cd into rust/).
[workspace]
members = ["rust"]

View File

@@ -1,7 +0,0 @@
# Installation Instructions
This document has moved to the
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
Please update your links.
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).

3
MANIFEST.in Normal file
View File

@@ -0,0 +1,3 @@
recursive-include docs *
recursive-include tests *.py
recursive-include synapse/persistence/schema *.sql

35
MAP.rst Normal file
View File

@@ -0,0 +1,35 @@
Directory Structure
===================
Warning: this may be a bit stale...
::
.
├── cmdclient Basic CLI python Matrix client
├── demo Scripts for running standalone Matrix demos
├── docs All doc, including the draft Matrix API spec
│   ├── client-server The client-server Matrix API spec
│   ├── model Domain-specific elements of the Matrix API spec
│   ├── server-server The server-server model of the Matrix API spec
│   └── sphinx The internal API doc of the Synapse homeserver
├── experiments Early experiments of using Synapse's internal APIs
├── graph Visualisation of Matrix's distributed message store
├── synapse The reference Matrix homeserver implementation
│   ├── api Common building blocks for the APIs
│   │   ├── events Definition of state representation Events
│   │   └── streams Definition of streamable Event objects
│   ├── app The __main__ entry point for the homeserver
│   ├── crypto The PKI client/server used for secure federation
│   │   └── resource PKI helper objects (e.g. keys)
│   ├── federation Server-server state replication logic
│   ├── handlers The main business logic of the homeserver
│   ├── http Wrappers around Twisted's HTTP server & client
│   ├── rest Servlet-style RESTful API
│   ├── storage Persistence subsystem (currently only sqlite3)
│   │   └── schema sqlite persistence schema
│   └── util Synapse-specific utilities
├── tests Unit tests for the Synapse homeserver
└── webclient Basic AngularJS Matrix web client

View File

@@ -1,250 +1,290 @@
=========================================================================
Synapse |support| |development| |documentation| |license| |pypi| |python|
=========================================================================
Introduction
============
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
maintained by the Matrix.org Foundation. We began rapid development in 2014,
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
in earnest today.
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
VoIP. The basics you need to know to get up and running are:
Briefly, Matrix is an open standard for communications on the internet, supporting
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
<https://spec.matrix.org/>`_ describes the technical details.
- Chatrooms are distributed and do not exist on any single server. Rooms
can be found using names like ``#matrix:matrix.org`` or
``#test:localhost:8008`` or they can be ephemeral.
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
you will normally refer to yourself and others using a 3PID: email
address, phone number, etc rather than manipulating Matrix user IDs)
.. contents::
The overall architecture is::
Installing and configuration
============================
client <----> homeserver <=====================> homeserver <----> client
https://matrix.org/_matrix https://mydomain.net/_matrix
The Synapse documentation describes `how to install Synapse <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_. We recommend using
`Docker images <https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
<https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
Quick Start
===========
.. _federation:
To get up and running:
- To simply play with an **existing** homeserver you can
just go straight to http://matrix.org/alpha.
- To run your own **private** homeserver on localhost:8008, install synapse
with ``python setup.py develop --user`` and then run one with
``python synapse/app/homeserver.py`` - you will find a webclient running
at http://localhost:8008 (use a recent Chrome, Safari or Firefox for now,
please...)
- To make the homeserver **public** and let it exchange messages with
other homeservers and participate in the overall Matrix federation, open
up port 8448 and run ``python synapse/app/homeserver.py --host
machine.my.domain.name``. Then come join ``#matrix:matrix.org`` and
say hi! :)
Synapse has a variety of `config options
<https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
which can be used to customise its behaviour after installation.
There are additional details on how to `configure Synapse for federation here
<https://matrix-org.github.io/synapse/latest/federate.html>`_.
For more detailed setup instructions, please see further down this document.
.. _reverse-proxy:
About Matrix
============
Using a reverse proxy with Synapse
----------------------------------
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
which handle:
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
For information on configuring one, see `the reverse proxy docs
<https://matrix-org.github.io/synapse/latest/reverse_proxy.html>`_.
- Creating and managing fully distributed chat rooms with no
single points of control or failure
- Eventually-consistent cryptographically secure[1] synchronisation of room
state across a global open network of federated servers and services
- Sending and receiving extensible messages in a room with (optional)
end-to-end encryption[2]
- Inviting, joining, leaving, kicking, banning room members
- Managing user accounts (registration, login, logout)
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
Facebook accounts to authenticate, identify and discover users on Matrix.
- Placing 1:1 VoIP and Video calls
Upgrading an existing Synapse
-----------------------------
These APIs are intended to be implemented on a wide range of servers, services
and clients, letting developers build messaging and VoIP functionality on top of
the entirely open Matrix ecosystem rather than using closed or proprietary
solutions. The hope is for Matrix to act as the building blocks for a new
generation of fully open and interoperable messaging and VoIP apps for the
internet.
The instructions for upgrading Synapse are in `the upgrade notes`_.
Please check these instructions as upgrading may require extra steps for some
versions of Synapse.
Synapse is a reference "homeserver" implementation of Matrix from the core
development team at matrix.org, written in Python/Twisted for clarity and
simplicity. It is intended to showcase the concept of Matrix and let folks see
the spec in the context of a codebase and let you run your own homeserver and
generally help bootstrap the ecosystem.
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
In Matrix, every user runs one or more Matrix clients, which connect through to
a Matrix homeserver which stores all their personal chat history and user
account information - much as a mail client connects through to an IMAP/SMTP
server. Just like email, you can either run your own Matrix homeserver and
control and own your own communications and history or use one hosted by someone
else (e.g. matrix.org) - there is no single point of control or mandatory
service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
web client demo implemented in AngularJS) and cmdclient (a basic Python
commandline utility which lets you easily see what the JSON APIs are up to).
We'd like to invite you to take a look at the Matrix spec, try to run a
homeserver, and join the existing Matrix chatrooms already out there, experiment
with the APIs and the demo clients, and let us know your thoughts at
https://github.com/matrix-org/synapse/issues or at matrix@matrix.org.
Thanks for trying Matrix!
[1] Cryptographic signing of messages isn't turned on yet
[2] End-to-end encryption is currently in development
Platform dependencies
---------------------
Homeserver Installation
=======================
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
and aims to follow supported upstream versions. See the
`deprecation policy <https://matrix-org.github.io/synapse/latest/deprecation_policy.html>`_
for more details.
First, the dependencies need to be installed. Start by installing
'python2.7-dev' and the various tools of the compiler toolchain.
N.B. synapse requires python 2.x where x >= 7
Installing prerequisites on ubuntu::
$ sudo apt-get install build-essential python2.7-dev libffi-dev
Installing prerequisites on Mac OS X::
$ xcode-select --install
The homeserver has a number of external dependencies, that are easiest
to install by making setup.py do so, in --user mode::
$ python setup.py develop --user
You'll need a version of setuptools new enough to know about git, so you
may need to also run:
$ sudo apt-get install python-pip
$ sudo pip install --upgrade setuptools
If you don't have access to github, then you may need to install ``syutil``
manually by checking it out and running ``python setup.py develop --user`` on it
too.
If you get errors about ``sodium.h`` being missing, you may also need to
manually install a newer PyNaCl via pip as setuptools installs an old one. Or
you can check PyNaCl out of git directly (https://github.com/pyca/pynacl) and
installing it. Installing PyNaCl using pip may also work (remember to remove any
other versions installed by setuputils in, for example, ~/.local/lib).
This will run a process of downloading and installing into your
user's .local/lib directory all of the required dependencies that are
missing.
Once this is done, you may wish to run the homeserver's unit tests, to
check that everything is installed as it should be::
$ python setup.py test
This should end with a 'PASSED' result::
Ran 143 tests in 0.601s
PASSED (successes=143)
Security note
-------------
Upgrading an existing homeserver
================================
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
repository endpoints`_.
Before upgrading an existing homeserver to a new version, please refer to
UPGRADE.rst for any additional instructions.
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
Setting up Federation
=====================
Whilst we make a reasonable effort to mitigate against XSS attacks (for
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
domain hosting other web applications. This especially applies to sharing
the domain with Matrix web clients and other sensitive applications like
webmail. See
https://developer.github.com/changes/2014-04-25-user-content-security for more
information.
In order for other homeservers to send messages to your server, it will need to
be publicly visible on the internet, and they will need to know its host name.
You have two choices here, which will influence the form of your Matrix user
IDs:
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
1) Use the machine's own hostname as available on public DNS in the form of its
A or AAAA records. This is easier to set up initially, perhaps for testing,
but lacks the flexibility of SRV.
Ideally, the homeserver should not simply be on a different subdomain, but on
a completely different `registered domain`_ (also known as top-level site or
eTLD+1). This is because `some attacks`_ are still possible as long as the two
applications share the same registered domain.
2) Set up a SRV record for your domain name. This requires you create a SRV
record in DNS, but gives the flexibility to run the server on your own
choice of TCP port, on a machine that might not be the same name as the
domain name.
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
For the first form, simply pass the required hostname (of the machine) as the
--host parameter::
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
$ python synapse/app/homeserver.py \
--server-name machine.my.domain.name \
--config-path homeserver.config \
--generate-config
$ python synapse/app/homeserver.py --config-path homeserver.config
To illustrate this with an example, if your Element Web or other sensitive web
application is hosted on ``A.example1.com``, you should ideally host Synapse on
``example2.com``. Some amount of protection is offered by hosting on
``B.example1.com`` instead, so this is also acceptable in some scenarios.
However, you should *not* host your Synapse on ``A.example1.com``.
For the second form, first create your SRV record and publish it in DNS. This
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
and port where the server is running. (At the current time synapse does not
support clustering multiple servers into a single logical homeserver). The DNS
record would then look something like::
Note that all of the above refers exclusively to the domain used in Synapse's
``public_baseurl`` setting. In particular, it has no bearing on the domain
mentioned in MXIDs hosted on that server.
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.
At this point, you should then run the homeserver with the hostname of this
SRV record, as that is the name other machines will expect it to have::
$ python synapse/app/homeserver.py \
--server-name YOURDOMAIN \
--bind-port 8448 \
--config-path homeserver.config \
--generate-config
$ python synapse/app/homeserver.py --config-path homeserver.config
Testing a new installation
==========================
You may additionally want to pass one or more "-v" options, in order to
increase the verbosity of logging output; at least for initial testing.
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
For the initial alpha release, the homeserver is not speaking TLS for
either client-server or server-server traffic for ease of debugging. We have
also not spent any time yet getting the homeserver to run behind loadbalancers.
Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
Running a Demo Federation of Homeservers
----------------------------------------
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
If you want to get up and running quickly with a trio of homeservers in a
private federation (``localhost:8080``, ``localhost:8081`` and
``localhost:8082``) which you can then access through the webclient running at
http://localhost:8080. Simply run::
If all goes well you should at least be able to log in, create a room, and
start sending messages.
$ demo/start.sh
.. _`client-user-reg`:
Registering a new user from a client
------------------------------------
By default, registration of new users via Matrix clients is disabled. To enable
it:
1. In the
`registration config section <https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
set ``enable_registration: true`` in ``homeserver.yaml``.
2. Then **either**:
a. set up a `CAPTCHA <https://matrix-org.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
the public internet. Without it, anyone can freely register accounts on your homeserver.
This can be exploited by attackers to create spambots targetting the rest of the Matrix
federation.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
the form of::
@localpart:my.domain.name
(pronounced "at localpart on my dot domain dot name").
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box.
Troubleshooting and support
Running The Demo Web Client
===========================
The `Admin FAQ <https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html>`_
includes tips on dealing with some common problems. For more details, see
`Synapse's wider documentation <https://matrix-org.github.io/synapse/latest/>`_.
The homeserver runs a web client by default at http://localhost:8080.
For additional support installing or managing Synapse, please ask in the community
support room |room|_ (from a matrix.org account if necessary). We do not use GitHub
issues for support requests, only for bug reports and feature requests.
If this is the first time you have used the client from that browser (it uses
HTML5 local storage to remember its config), you will need to log in to your
account. If you don't yet have an account, because you've just started the
homeserver for the first time, then you'll need to register one.
.. |room| replace:: ``#synapse:matrix.org``
.. _room: https://matrix.to/#/#synapse:matrix.org
.. |docs| replace:: ``docs``
.. _docs: docs
Registering A New Account
-------------------------
Your new user name will be formed partly from the hostname your server is
running as, and partly from a localpart you specify when you create the
account. Your name will take the form of::
@localpart:my.domain.here
(pronounced "at localpart on my dot domain dot here")
Specify your desired localpart in the topmost box of the "Register for an
account" form, and click the "Register" button. Hostnames can contain ports if
required due to lack of SRV records (e.g. @matthew:localhost:8080 on an internal
synapse sandbox running on localhost)
Logging In To An Existing Account
---------------------------------
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
the form and click the Login button.
Identity Servers
================
Identity servers have the job of mapping email addresses and other 3rd Party
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
before creating that mapping.
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
given Matrix user is very security-sensitive, as there is obvious risk of spam
if it is too easy to sign up for Matrix accounts or harvest 3PID data. Meanwhile
the job of publishing the end-to-end encryption public keys for Matrix users is
also very security-sensitive for similar reasons.
**They are not where accounts or credentials are stored - these live on home
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
Therefore the role of managing trusted identity in the Matrix ecosystem is
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
track 3PID logins and publish end-user public keys.
This process is very security-sensitive, as there is obvious risk of spam if it
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
term, we hope to create a decentralised system to manage it (`matrix-doc #712
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
the role of managing trusted identity in the Matrix ecosystem is farmed out to
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
is purely to authenticate and track 3PID logins and publish end-user public
keys.
You can host your own copy of Sydent, but this will prevent you reaching other
users in the Matrix ecosystem via their email address, and prevent them finding
you. We therefore recommend that you use one of the centralised identity servers
at ``https://matrix.org`` or ``https://vector.im`` for now.
To reiterate: the Identity server will only be used if you choose to associate
an email address with your account, or send an invite to another user via their
email address.
It's currently early days for identity servers as Matrix is not yet using 3PIDs
as the primary means of identity and E2E encryption is not complete. As such,
we're not yet running an identity server in public.
Development
===========
Where's the spec?!
==================
We welcome contributions to Synapse from the community!
The best place to get started is our
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
information for Synapse developers as well as Synapse administrators.
Developers might be particularly interested in:
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
* `notes on Synapse's implementation details <https://matrix-org.github.io/synapse/latest/development/internal_documentation/index.html>`_, and
* `how we use git <https://matrix-org.github.io/synapse/latest/development/git.html>`_.
Alongside all that, join our developer community on Matrix:
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
For now, please go spelunking in the ``docs/`` directory to find out.
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
Building Internal API Documentation
===================================
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
:alt: (discuss development on #synapse-dev:matrix.org)
:target: https://matrix.to/#/#synapse-dev:matrix.org
Before building internal API documentation install spinx and
sphinxcontrib-napoleon::
.. |documentation| image:: https://img.shields.io/badge/documentation-%E2%9C%93-success
:alt: (Rendered documentation on GitHub Pages)
:target: https://matrix-org.github.io/synapse/latest/
$ pip install sphinx
$ pip install sphinxcontrib-napoleon
.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse
:alt: (check license in LICENSE file)
:target: LICENSE
Building internal API documentation::
.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse
:alt: (latest version released on PyPi)
:target: https://pypi.org/project/matrix-synapse
$ python setup.py build_sphinx
.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse
:alt: (supported python versions)
:target: https://pypi.org/project/matrix-synapse

View File

@@ -1,7 +1,50 @@
Upgrading Synapse
=================
Upgrading to v0.2.0
===================
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
Please update your links.
The home server now requires setting up of SSL config before it can run. To
automatically generate default config use::
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
$ python synapse/app/homeserver.py \
--server-name machine.my.domain.name \
--bind-port 8448 \
--config-path homeserver.config \
--generate-config
This config can be edited if desired, for example to specify a different SSL
certificate to use. Once done you can run the home server using::
$ python synapse/app/homeserver.py --config-path homeserver.config
See the README.rst for more information.
Also note that some config options have been renamed, including:
- "host" to "server-name"
- "database" to "database-path"
- "port" to "bind-port" and "unsecure-port"
Upgrading to v0.0.1
===================
This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./database-prepare-for-0.0.1.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.

1
VERSION Normal file
View File

@@ -0,0 +1 @@
0.2.0

7
WISHLIST.rst Normal file
View File

@@ -0,0 +1,7 @@
Broad-sweeping stuff which would be nice to have
================================================
- Additional SQL backends beyond sqlite
- homeserver implementation in go
- homeserver implementation in node.js
- client SDKs

View File

@@ -1,47 +0,0 @@
# Documentation for possible options in this file is at
# https://rust-lang.github.io/mdBook/format/config.html
[book]
title = "Synapse"
authors = ["The Matrix.org Foundation C.I.C."]
language = "en"
multilingual = false
# The directory that documentation files are stored in
src = "docs"
[build]
# Prevent markdown pages from being automatically generated when they're
# linked to in SUMMARY.md
create-missing = false
[output.html]
# The URL visitors will be directed to when they try to edit a page
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
# Remove the numbers that appear before each item in the sidebar, as they can
# get quite messy as we nest deeper
no-section-label = true
# The source code URL of the repository
git-repository-url = "https://github.com/matrix-org/synapse"
# The path that the docs are hosted on
site-url = "/synapse/"
# Additional HTML, JS, CSS that's injected into each page of the book.
# More information available in docs/website_files/README.md
additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"

View File

@@ -1,23 +0,0 @@
# A build script for poetry that adds the rust extension.
import os
from typing import Any, Dict
from setuptools_rust import Binding, RustExtension
def build(setup_kwargs: Dict[str, Any]) -> None:
original_project_dir = os.path.dirname(os.path.realpath(__file__))
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
extension = RustExtension(
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False

View File

@@ -1 +0,0 @@
!.gitignore

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +15,11 @@
# limitations under the License.
""" Starts a synapse client console. """
from twisted.internet import reactor, defer, threads
from http import TwistedHttpClient
import argparse
import binascii
import cmd
import getpass
import json
@@ -24,23 +27,21 @@ import shlex
import sys
import time
import urllib
from http import TwistedHttpClient
from typing import Optional
import urlparse
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
import nacl.signing
import nacl.encoding
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
# TODO: The concept of trusted identity servers has been deprecated. This option and checks
# should be removed
TRUSTED_ID_SERVERS = ["localhost:8001"]
TRUSTED_ID_SERVERS = [
'localhost:8001'
]
class SynapseCmd(cmd.Cmd):
"""Basic synapse command-line processor.
This processes commands from the user and calls the relevant HTTP methods.
@@ -57,7 +58,7 @@ class SynapseCmd(cmd.Cmd):
"token": token,
"verbose": "on",
"complete_usernames": "on",
"send_delivery_receipts": "on",
"send_delivery_receipts": "on"
}
self.path_prefix = "/_matrix/client/api/v1"
self.event_stream_token = "END"
@@ -92,7 +93,7 @@ class SynapseCmd(cmd.Cmd):
return self.config["user"].split(":")[1]
def do_config(self, line):
"""Show the config for this client: "config"
""" Show the config for this client: "config"
Edit a key value mapping: "config key value" e.g. "config token 1234"
Config variables:
user: The username to auth with.
@@ -108,7 +109,7 @@ class SynapseCmd(cmd.Cmd):
by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
"""
if len(line) == 0:
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
return
try:
@@ -118,11 +119,12 @@ class SynapseCmd(cmd.Cmd):
config_rules = [ # key, valid_values
("verbose", ["on", "off"]),
("complete_usernames", ["on", "off"]),
("send_delivery_receipts", ["on", "off"]),
("send_delivery_receipts", ["on", "off"])
]
for key, valid_vals in config_rules:
if key == args["key"] and args["val"] not in valid_vals:
print("%s value must be one of %s" % (args["key"], valid_vals))
print "%s value must be one of %s" % (args["key"],
valid_vals)
return
# toggle the http client verbosity
@@ -131,11 +133,11 @@ class SynapseCmd(cmd.Cmd):
# assign the new config
self.config[args["key"]] = args["val"]
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
save_config(self.config)
except Exception as e:
print(e)
print e
def do_register(self, line):
"""Registers for a new account: "register <userid> <noupdate>"
@@ -143,50 +145,36 @@ class SynapseCmd(cmd.Cmd):
<noupdate> : Do not automatically clobber config values.
"""
args = self._parse(line, ["userid", "noupdate"])
path = "/register"
password = None
pwd = None
pwd2 = "_"
while pwd != pwd2:
pwd = getpass.getpass("Type a password for this user: ")
pwd = getpass.getpass("(Optional) Type a password for this user: ")
if len(pwd) == 0:
print "Not using a password for this user."
break
pwd2 = getpass.getpass("Retype the password: ")
if pwd != pwd2 or len(pwd) == 0:
print("Password mismatch.")
pwd = None
if pwd != pwd2:
print "Password mismatch."
else:
password = pwd
body = {"type": "m.login.password"}
body = {}
if "userid" in args:
body["user"] = args["userid"]
body["user_id"] = args["userid"]
if password:
body["password"] = password
reactor.callFromThread(self._do_register, body, "noupdate" not in args)
reactor.callFromThread(self._do_register, "POST", path, body,
"noupdate" not in args)
@defer.inlineCallbacks
def _do_register(self, data, update_config):
# check the registration flows
url = self._url() + "/register"
json_res = yield self.http_client.do_request("GET", url)
print(json.dumps(json_res, indent=4))
passwordFlow = None
for flow in json_res["flows"]:
if flow["type"] == "m.login.recaptcha" or (
"stages" in flow and "m.login.recaptcha" in flow["stages"]
):
print("Unable to register: Home server requires captcha.")
return
if flow["type"] == "m.login.password" and "stages" not in flow:
passwordFlow = flow
break
if not passwordFlow:
return
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json.dumps(json_res, indent=4))
def _do_register(self, method, path, data, update_config):
url = self._url() + path
json_res = yield self.http_client.do_request(method, url, data=data)
print json.dumps(json_res, indent=4)
if update_config and "user_id" in json_res:
self.config["user"] = json_res["user_id"]
self.config["token"] = json_res["access_token"]
@@ -198,7 +186,9 @@ class SynapseCmd(cmd.Cmd):
"""
try:
args = self._parse(line, ["user_id"], force_keys=True)
can_login = threads.blockingCallFromThread(reactor, self._check_can_login)
can_login = threads.blockingCallFromThread(
reactor,
self._check_can_login)
if can_login:
p = getpass.getpass("Enter your password: ")
user = args["user_id"]
@@ -206,25 +196,29 @@ class SynapseCmd(cmd.Cmd):
domain = self._domain()
if domain:
user = "@" + user + ":" + domain
reactor.callFromThread(self._do_login, user, p)
# print " got %s " % p
#print " got %s " % p
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_login(self, user, password):
path = "/login"
data = {"user": user, "password": password, "type": "m.login.password"}
data = {
"user": user,
"password": password,
"type": "m.login.password"
}
url = self._url() + path
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json_res)
print json_res
if "access_token" in json_res:
self.config["user"] = user
self.config["token"] = json_res["access_token"]
save_config(self.config)
print("Login successful.")
print "Login successful."
@defer.inlineCallbacks
def _check_can_login(self):
@@ -233,19 +227,18 @@ class SynapseCmd(cmd.Cmd):
# submitting!
url = self._url() + path
json_res = yield self.http_client.do_request("GET", url)
print(json_res)
print json_res
if "flows" not in json_res:
print("Failed to find any login flows.")
print "Failed to find any login flows."
defer.returnValue(False)
flow = json_res["flows"][0] # assume first is the one we want.
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
flow = json_res["flows"][0] # assume first is the one we want.
if ("type" not in flow or "m.login.password" != flow["type"] or
"stages" in flow):
fallback_url = self._url() + "/login/fallback"
print(
"Unable to login via the command line client. Please visit "
"%s to login." % fallback_url
)
print ("Unable to login via the command line client. Please visit "
"%s to login." % fallback_url)
defer.returnValue(False)
defer.returnValue(True)
@@ -255,34 +248,21 @@ class SynapseCmd(cmd.Cmd):
<clientSecret> A string of characters generated when requesting an email that you'll supply in subsequent calls to identify yourself
<sendAttempt> The number of times the user has requested an email. Leave this the same between requests to retry the request at the transport level. Increment it to request that the email be sent again.
"""
args = self._parse(line, ["address", "clientSecret", "sendAttempt"])
args = self._parse(line, ['address', 'clientSecret', 'sendAttempt'])
postArgs = {
"email": args["address"],
"clientSecret": args["clientSecret"],
"sendAttempt": args["sendAttempt"],
}
postArgs = {'email': args['address'], 'clientSecret': args['clientSecret'], 'sendAttempt': args['sendAttempt']}
reactor.callFromThread(self._do_emailrequest, postArgs)
@defer.inlineCallbacks
def _do_emailrequest(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/requestToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/requestToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
if "sid" in json_res:
print("Token sent. Your session ID is %s" % (json_res["sid"]))
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
if 'sid' in json_res:
print "Token sent. Your session ID is %s" % (json_res['sid'])
def do_emailvalidate(self, line):
"""Validate and associate a third party ID
@@ -290,58 +270,39 @@ class SynapseCmd(cmd.Cmd):
<token> The token sent to your third party identifier address
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "token", "clientSecret"])
args = self._parse(line, ['sid', 'token', 'clientSecret'])
postArgs = {
"sid": args["sid"],
"token": args["token"],
"clientSecret": args["clientSecret"],
}
postArgs = { 'sid' : args['sid'], 'token' : args['token'], 'clientSecret': args['clientSecret'] }
reactor.callFromThread(self._do_emailvalidate, postArgs)
@defer.inlineCallbacks
def _do_emailvalidate(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/submitToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/submitToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_3pidbind(self, line):
"""Validate and associate a third party ID
<sid> The session ID (sid) given to you in the response to requestToken
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "clientSecret"])
args = self._parse(line, ['sid', 'clientSecret'])
postArgs = {"sid": args["sid"], "clientSecret": args["clientSecret"]}
postArgs["mxid"] = self.config["user"]
postArgs = { 'sid' : args['sid'], 'clientSecret': args['clientSecret'] }
postArgs['mxid'] = self.config["user"]
reactor.callFromThread(self._do_3pidbind, postArgs)
@defer.inlineCallbacks
def _do_3pidbind(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
url = self._identityServerUrl()+"/_matrix/identity/api/v1/3pid/bind"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_join(self, line):
"""Joins a room: "join <roomid>" """
@@ -349,7 +310,7 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "join", self._usr())
except Exception as e:
print(e)
print e
def do_joinalias(self, line):
try:
@@ -357,34 +318,36 @@ class SynapseCmd(cmd.Cmd):
path = "/join/%s" % urllib.quote(args["roomname"])
reactor.callFromThread(self._run_and_pprint, "POST", path, {})
except Exception as e:
print(e)
print e
def do_topic(self, line):
""" "topic [set|get] <roomid> [<newtopic>]"
""""topic [set|get] <roomid> [<newtopic>]"
Set the topic for a room: topic set <roomid> <newtopic>
Get the topic for a room: topic get <roomid>
"""
try:
args = self._parse(line, ["action", "roomid", "topic"])
if "action" not in args or "roomid" not in args:
print("Must specify set|get and a room ID.")
print "Must specify set|get and a room ID."
return
if args["action"].lower() not in ["set", "get"]:
print("Must specify set|get, not %s" % args["action"])
print "Must specify set|get, not %s" % args["action"]
return
path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
if args["action"].lower() == "set":
if "topic" not in args:
print("Must specify a new topic.")
print "Must specify a new topic."
return
body = {"topic": args["topic"]}
body = {
"topic": args["topic"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body)
elif args["action"].lower() == "get":
reactor.callFromThread(self._run_and_pprint, "GET", path)
except Exception as e:
print(e)
print e
def do_invite(self, line):
"""Invite a user to a room: "invite <userid> <roomid>" """
@@ -395,66 +358,49 @@ class SynapseCmd(cmd.Cmd):
reactor.callFromThread(self._do_invite, args["roomid"], user_id)
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_invite(self, roomid, userstring):
if not userstring.startswith("@") and self._is_on("complete_usernames"):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
if (not userstring.startswith('@') and
self._is_on("complete_usernames")):
url = self._identityServerUrl()+"/_matrix/identity/api/v1/lookup"
json_res = yield self.http_client.do_request(
"GET", url, qparams={"medium": "email", "address": userstring}
)
json_res = yield self.http_client.do_request("GET", url, qparams={'medium':'email','address':userstring})
mxid = None
if "mxid" in json_res and "signatures" in json_res:
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/pubkey/ed25519"
)
if 'mxid' in json_res and 'signatures' in json_res:
url = self._identityServerUrl()+"/_matrix/identity/api/v1/pubkey/ed25519"
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = decode_verify_key_bytes(
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
)
if 'public_key' in pubKeyObj:
pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
else:
print("No public key found in pubkey response!")
print "No public key found in pubkey response!"
sigValid = False
if pubKey:
for signame in json_res["signatures"]:
for signame in json_res['signatures']:
if signame not in TRUSTED_ID_SERVERS:
print(
"Ignoring signature from untrusted server %s"
% (signame)
)
print "Ignoring signature from untrusted server %s" % (signame)
else:
try:
verify_signed_json(json_res, signame, pubKey)
sigValid = True
print(
"Mapping %s -> %s correctly signed by %s"
% (userstring, json_res["mxid"], signame)
)
print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
break
except SignatureVerifyException as e:
print("Invalid signature from %s" % (signame))
print(e)
print "Invalid signature from %s" % (signame)
print e
if sigValid:
print("Resolved 3pid %s to %s" % (userstring, json_res["mxid"]))
mxid = json_res["mxid"]
print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
mxid = json_res['mxid']
else:
print(
"Got association for %s but couldn't verify signature"
% (userstring)
)
print "Got association for %s but couldn't verify signature" % (userstring)
if not mxid:
mxid = "@" + userstring + ":" + self._domain()
@@ -467,17 +413,18 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "leave", self._usr())
except Exception as e:
print(e)
print e
def do_send(self, line):
"""Sends a message. "send <roomid> <body>" """
args = self._parse(line, ["roomid", "body"])
txn_id = "txn%s" % int(time.time())
path = "/rooms/%s/send/m.room.message/%s" % (
urllib.quote(args["roomid"]),
txn_id,
)
body_json = {"msgtype": "m.text", "body": args["body"]}
path = "/rooms/%s/send/m.room.message/%s" % (urllib.quote(args["roomid"]),
txn_id)
body_json = {
"msgtype": "m.text",
"body": args["body"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body_json)
def do_list(self, line):
@@ -490,11 +437,11 @@ class SynapseCmd(cmd.Cmd):
"list messages <roomid> from=END&to=START&limit=3"
"""
args = self._parse(line, ["type", "roomid", "qp"])
if "type" not in args or "roomid" not in args:
print("Must specify type and room ID.")
if not "type" in args or not "roomid" in args:
print "Must specify type and room ID."
return
if args["type"] not in ["members", "messages"]:
print("Unrecognised type: %s" % args["type"])
print "Unrecognised type: %s" % args["type"]
return
room_id = args["roomid"]
path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
@@ -505,11 +452,12 @@ class SynapseCmd(cmd.Cmd):
try:
key_value = key_value_str.split("=")
qp[key_value[0]] = key_value[1]
except Exception:
print("Bad query param: %s" % key_value)
except:
print "Bad query param: %s" % key_value
return
reactor.callFromThread(self._run_and_pprint, "GET", path, query_params=qp)
reactor.callFromThread(self._run_and_pprint, "GET", path,
query_params=qp)
def do_create(self, line):
"""Creates a room.
@@ -545,22 +493,14 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["method", "path", "data"])
# sanity check
if "method" not in args or "path" not in args:
print("Must specify path and method.")
print "Must specify path and method."
return
args["method"] = args["method"].upper()
valid_methods = [
"PUT",
"GET",
"POST",
"DELETE",
"XPUT",
"XGET",
"XPOST",
"XDELETE",
]
valid_methods = ["PUT", "GET", "POST", "DELETE",
"XPUT", "XGET", "XPOST", "XDELETE"]
if args["method"] not in valid_methods:
print("Unsupported method: %s" % args["method"])
print "Unsupported method: %s" % args["method"]
return
if "data" not in args:
@@ -569,7 +509,7 @@ class SynapseCmd(cmd.Cmd):
try:
args["data"] = json.loads(args["data"])
except Exception as e:
print("Data is not valid JSON. %s" % e)
print "Data is not valid JSON. %s" % e
return
qp = {"access_token": self._tok()}
@@ -582,16 +522,13 @@ class SynapseCmd(cmd.Cmd):
parsed_url = urlparse.urlparse(args["path"])
qp.update(urlparse.parse_qs(parsed_url.query))
args["path"] = parsed_url.path
except Exception:
except:
pass
reactor.callFromThread(
self._run_and_pprint,
args["method"],
args["path"],
args["data"],
query_params=qp,
)
reactor.callFromThread(self._run_and_pprint, args["method"],
args["path"],
args["data"],
query_params=qp)
def do_stream(self, line):
"""Stream data from the server: "stream <longpoll timeout ms>" """
@@ -601,31 +538,26 @@ class SynapseCmd(cmd.Cmd):
try:
timeout = int(args["timeout"])
except ValueError:
print("Timeout must be in milliseconds.")
print "Timeout must be in milliseconds."
return
reactor.callFromThread(self._do_event_stream, timeout)
@defer.inlineCallbacks
def _do_event_stream(self, timeout):
res = yield defer.ensureDeferred(
self.http_client.get_json(
res = yield self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
)
print(json.dumps(res, indent=4))
"from": self.event_stream_token
})
print json.dumps(res, indent=4)
if "chunk" in res:
for event in res["chunk"]:
if (
event["type"] == "m.room.message"
and self._is_on("send_delivery_receipts")
and event["user_id"] != self._usr()
): # not sent by us
if (event["type"] == "m.room.message" and
self._is_on("send_delivery_receipts") and
event["user_id"] != self._usr()): # not sent by us
self._send_receipt(event, "d")
# update the position in the stram
@@ -633,28 +565,18 @@ class SynapseCmd(cmd.Cmd):
self.event_stream_token = res["end"]
def _send_receipt(self, event, feedback_type):
path = "/rooms/%s/messages/%s/%s/feedback/%s/%s" % (
urllib.quote(event["room_id"]),
event["user_id"],
event["msg_id"],
self._usr(),
feedback_type,
)
path = ("/rooms/%s/messages/%s/%s/feedback/%s/%s" %
(urllib.quote(event["room_id"]), event["user_id"], event["msg_id"],
self._usr(), feedback_type))
data = {}
reactor.callFromThread(
self._run_and_pprint,
"PUT",
path,
data=data,
alt_text="Sent receipt for %s" % event["msg_id"],
)
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data,
alt_text="Sent receipt for %s" % event["msg_id"])
def _do_membership_change(self, roomid, membership, userid):
path = "/rooms/%s/state/m.room.member/%s" % (
urllib.quote(roomid),
urllib.quote(userid),
)
data = {"membership": membership}
path = "/rooms/%s/state/m.room.member/%s" % (urllib.quote(roomid), urllib.quote(userid))
data = {
"membership": membership
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
def do_displayname(self, line):
@@ -690,7 +612,7 @@ class SynapseCmd(cmd.Cmd):
self._do_presence_state(2, line)
def _parse(self, line, keys, force_keys=False):
"""Parses the given line.
""" Parses the given line.
Args:
line : The line to parse
@@ -707,21 +629,16 @@ class SynapseCmd(cmd.Cmd):
for i, arg in enumerate(line_args):
for config_key in self.config:
if ("$" + config_key) in arg:
arg = arg.replace("$" + config_key, self.config[config_key])
arg = arg.replace("$" + config_key,
self.config[config_key])
line_args[i] = arg
return dict(zip(keys, line_args))
@defer.inlineCallbacks
def _run_and_pprint(
self,
method,
path,
data=None,
query_params: Optional[dict] = None,
alt_text=None,
):
"""Runs an HTTP request and pretty prints the output.
def _run_and_pprint(self, method, path, data=None,
query_params={"access_token": None}, alt_text=None):
""" Runs an HTTP request and pretty prints the output.
Args:
method: HTTP method
@@ -729,37 +646,35 @@ class SynapseCmd(cmd.Cmd):
data: Raw JSON data if any
query_params: dict of query parameters to add to the url
"""
query_params = query_params or {"access_token": None}
url = self._url() + path
if "access_token" in query_params:
query_params["access_token"] = self._tok()
json_res = yield self.http_client.do_request(
method, url, data=data, qparams=query_params
)
json_res = yield self.http_client.do_request(method, url,
data=data,
qparams=query_params)
if alt_text:
print(alt_text)
print alt_text
else:
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
def save_config(config):
with open(CONFIG_JSON, "w") as out:
with open(CONFIG_JSON, 'w') as out:
json.dump(config, out)
def main(server_url, identity_server_url, username, token, config_path):
print("Synapse command line client")
print("===========================")
print("Server: %s" % server_url)
print("Type 'help' to get started.")
print("Close this console with CTRL+C then CTRL+D.")
print "Synapse command line client"
print "==========================="
print "Server: %s" % server_url
print "Type 'help' to get started."
print "Close this console with CTRL+C then CTRL+D."
if not username or not token:
print("- 'register <username>' - Register an account")
print("- 'stream' - Connect to the event stream")
print("- 'create <roomid>' - Create a room")
print("- 'send <roomid> <message>' - Send a message")
print "- 'register <username>' - Register an account"
print "- 'stream' - Connect to the event stream"
print "- 'create <roomid>' - Create a room"
print "- 'send <roomid> <message>' - Send a message"
http_client = TwistedHttpClient()
# the command line client
@@ -769,14 +684,14 @@ def main(server_url, identity_server_url, username, token, config_path):
global CONFIG_JSON
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
try:
with open(config_path, "r") as config:
with open(config_path, 'r') as config:
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]
except Exception:
except:
pass
print("Loaded config from %s" % config_path)
except Exception:
print "Loaded config from %s" % config_path
except:
pass
# Twisted-specific: Runs the command processor in Twisted's event loop
@@ -786,37 +701,27 @@ def main(server_url, identity_server_url, username, token, config_path):
reactor.run()
if __name__ == "__main__":
if __name__ == '__main__':
parser = argparse.ArgumentParser("Starts a synapse client.")
parser.add_argument(
"-s",
"--server",
dest="server",
default="http://localhost:8008",
help="The URL of the home server to talk to.",
)
"-s", "--server", dest="server", default="http://localhost:8008",
help="The URL of the home server to talk to.")
parser.add_argument(
"-i",
"--identity-server",
dest="identityserver",
default="http://localhost:8090",
help="The URL of the identity server to talk to.",
)
"-i", "--identity-server", dest="identityserver", default="http://localhost:8090",
help="The URL of the identity server to talk to.")
parser.add_argument(
"-u", "--username", dest="username", help="Your username on the server."
)
parser.add_argument("-t", "--token", dest="token", help="Your access token.")
"-u", "--username", dest="username",
help="Your username on the server.")
parser.add_argument(
"-c",
"--config",
dest="config",
default=CONFIG_JSON,
help="The location of the config.json file to read from.",
)
"-t", "--token", dest="token",
help="Your access token.")
parser.add_argument(
"-c", "--config", dest="config", default=CONFIG_JSON,
help="The location of the config.json file to read from.")
args = parser.parse_args()
if not args.server:
print("You must supply a server URL to communicate with.")
print "You must supply a server URL to communicate with."
parser.print_help()
sys.exit(1)

View File

@@ -1,4 +1,5 @@
# Copyright 2014-2016 OpenMarket Ltd
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,21 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from pprint import pformat
from typing import Optional
from twisted.internet import defer, reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import defer, reactor
from pprint import pformat
import json
import urllib
class HttpClient:
"""Interface for talking json over http"""
class HttpClient(object):
""" Interface for talking json over http
"""
def put_json(self, url, data):
"""Sends the specifed json data using PUT
""" Sends the specifed json data using PUT
Args:
url (str): The URL to PUT data to.
@@ -34,13 +36,15 @@ class HttpClient:
the request body. This will be encoded as JSON.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
""" Get's some json from the given host homeserver and path
Args:
url (str): The URL to GET data from.
@@ -50,14 +54,16 @@ class HttpClient:
and *not* a string.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
class TwistedHttpClient(HttpClient):
"""Wrapper around the twisted HTTP client api.
""" Wrapper around the twisted HTTP client api.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
@@ -70,7 +76,9 @@ class TwistedHttpClient(HttpClient):
@defer.inlineCallbacks
def put_json(self, url, data):
response = yield self._create_put_request(
url, data, headers_dict={"Content-Type": ["application/json"]}
url,
data,
headers_dict={"Content-Type": ["application/json"]}
)
body = yield readBody(response)
defer.returnValue((response.code, body))
@@ -85,46 +93,45 @@ class TwistedHttpClient(HttpClient):
body = yield readBody(response)
defer.returnValue(json.loads(body))
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a PUT request"""
headers_dict = headers_dict or {}
def _create_put_request(self, url, json_data, headers_dict={}):
""" Wrapper of _create_request to issue a PUT request
"""
if "Content-Type" not in headers_dict:
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
raise defer.error(
RuntimeError("Must include Content-Type header for PUTs"))
return self._create_request(
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
"PUT",
url,
producer=_JsonProducer(json_data),
headers_dict=headers_dict
)
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a GET request"""
return self._create_request("GET", url, headers_dict=headers_dict or {})
def _create_get_request(self, url, headers_dict={}):
""" Wrapper of _create_request to issue a GET request
"""
return self._create_request(
"GET",
url,
headers_dict=headers_dict
)
@defer.inlineCallbacks
def do_request(
self,
method,
url,
data=None,
qparams=None,
jsonreq=True,
headers: Optional[dict] = None,
):
headers = headers or {}
def do_request(self, method, url, data=None, qparams=None, jsonreq=True, headers={}):
if qparams:
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
if jsonreq:
prod = _JsonProducer(data)
headers["Content-Type"] = ["application/json"]
headers['Content-Type'] = ["application/json"];
else:
prod = _RawProducer(data)
if method in ["POST", "PUT"]:
response = yield self._create_request(
method, url, producer=prod, headers_dict=headers
)
response = yield self._create_request(method, url,
producer=prod,
headers_dict=headers)
else:
response = yield self._create_request(method, url)
@@ -132,33 +139,33 @@ class TwistedHttpClient(HttpClient):
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def _create_request(
self, method, url, producer=None, headers_dict: Optional[dict] = None
):
"""Creates and sends a request to the given url"""
headers_dict = headers_dict or {}
def _create_request(self, method, url, producer=None, headers_dict={}):
""" Creates and sends a request to the given url
"""
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
retries_left = 5
print("%s to %s with headers %s" % (method, url, headers_dict))
print "%s to %s with headers %s" % (method, url, headers_dict)
if self.verbose and producer:
if "password" in producer.data:
temp = producer.data["password"]
producer.data["password"] = "[REDACTED]"
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
producer.data["password"] = temp
else:
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
while True:
try:
response = yield self.agent.request(
method, url.encode("UTF8"), Headers(headers_dict), producer
method,
url.encode("UTF8"),
Headers(headers_dict),
producer
)
break
except Exception as e:
print("uh oh: %s" % e)
print "uh oh: %s" % e
if retries_left:
yield self.sleep(2 ** (5 - retries_left))
retries_left -= 1
@@ -166,8 +173,8 @@ class TwistedHttpClient(HttpClient):
raise e
if self.verbose:
print("Status %s %s" % (response.code, response.phrase))
print(pformat(list(response.headers.getAllRawHeaders())))
print "Status %s %s" % (response.code, response.phrase)
print pformat(list(response.headers.getAllRawHeaders()))
defer.returnValue(response)
def sleep(self, seconds):
@@ -175,8 +182,7 @@ class TwistedHttpClient(HttpClient):
reactor.callLater(seconds, d.callback, seconds)
return d
class _RawProducer:
class _RawProducer(object):
def __init__(self, data):
self.data = data
self.body = data
@@ -192,10 +198,9 @@ class _RawProducer:
def stopProducing(self):
pass
class _JsonProducer:
"""Used by the twisted http client to create the HTTP body from json"""
class _JsonProducer(object):
""" Used by the twisted http client to create the HTTP body from json
"""
def __init__(self, jsn):
self.data = jsn
self.body = json.dumps(jsn).encode("utf8")
@@ -209,4 +214,4 @@ class _JsonProducer:
pass
def stopProducing(self):
pass
pass

View File

@@ -1,10 +0,0 @@
Community Contributions
=======================
Everything in this directory are projects submitted by the community that may be useful
to others. As such, the project maintainers cannot guarantee support, stability
or backwards compatibility of these projects.
Files in this directory should *not* be relied on directly, as they may not
continue to work or exist in future. If you wish to use any of these files then
they should be copied to avoid them breaking from underneath you.

View File

@@ -1,32 +0,0 @@
# Synapse Docker
### Configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration.
To generate a fresh `homeserver.yaml`, you can use the `generate` command.
(See the [documentation](../../docker/README.md#generating-a-configuration-file)
for more information.) You will need to specify appropriate values for at least the
`SYNAPSE_SERVER_NAME` and `SYNAPSE_REPORT_STATS` environment variables. For example:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host -e SYNAPSE_REPORT_STATS=yes synapse generate
```
(This will also generate necessary signing keys.)
Then, customize your configuration and run the server:
```
docker-compose up -d
```
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)

View File

@@ -1,66 +0,0 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
version: '3'
services:
synapse:
build:
context: ../..
dockerfile: docker/Dockerfile
image: docker.io/matrixdotorg/synapse:latest
# Since synapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:
# You may either store all the files in a local folder
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
- db
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
# The following lines are valid for Traefik version 1.x:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8008
# Alternatively, for Traefik version 2.0:
- traefik.enable=true
- traefik.http.routers.http-synapse.entryPoints=http
- traefik.http.routers.http-synapse.rule=Host(`my.matrix.host`)
- traefik.http.middlewares.https_redirect.redirectscheme.scheme=https
- traefik.http.middlewares.https_redirect.redirectscheme.permanent=true
- traefik.http.routers.http-synapse.middlewares=https_redirect
- traefik.http.routers.https-synapse.entryPoints=https
- traefik.http.routers.https-synapse.rule=Host(`my.matrix.host`)
- traefik.http.routers.https-synapse.service=synapse
- traefik.http.routers.https-synapse.tls=true
- traefik.http.services.synapse.loadbalancer.server.port=8008
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postgresql/data

View File

@@ -1,111 +0,0 @@
# Setting up Synapse with Workers using Docker Compose
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
Example worker configuration files can be found [here](workers).
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
An example Docker Compose file can be found [here](docker-compose.yaml).
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
### Generic Worker Example
```yaml
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
```
### Federation Sender Example
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
```yaml
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse
```
## `homeserver.yaml` Configuration
### Enable Redis
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
```yaml
redis:
enabled: true
host: redis
port: 6379
# password: <secret_password>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
### Add a replication Listener
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
```yaml
listeners:
# Other listeners
- port: 9093
type: http
resources:
- names: [replication]
```
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
```yaml
worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
```yaml
# This will disable federation sending on the main Synapse instance
send_federation: false
federation_sender_instances:
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
```
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -1,77 +0,0 @@
networks:
backend:
services:
postgres:
image: postgres:latest
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
networks:
- backend
environment:
POSTGRES_DB: synapse
POSTGRES_USER: synapse_user
POSTGRES_PASSWORD: postgres
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
redis:
image: redis:latest
restart: unless-stopped
networks:
- backend
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/data:rw
ports:
- 8008:8008
networks:
- backend
environment:
SYNAPSE_CONFIG_DIR: /data
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
depends_on:
- postgres
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse

View File

@@ -1,8 +0,0 @@
worker_app: synapse.app.federation_sender
worker_name: synapse-federation-sender-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_log_config: /data/federation_sender.log.config

View File

@@ -1,15 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: synapse-generic-worker-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8081
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /data/worker.log.config

View File

@@ -1,51 +0,0 @@
# Example log_config file for synapse. To enable, point `log_config` to it in
# `homeserver.yaml`, and restart synapse.
#
# This configuration will produce similar results to the defaults within
# synapse, but can be edited to give more flexibility.
version: 1
formatters:
fmt:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
# example output to console
console:
class: logging.StreamHandler
formatter: fmt
filters: [context]
# example output to file - to enable, edit 'root' config below.
file:
class: logging.handlers.RotatingFileHandler
formatter: fmt
filename: /var/log/synapse/homeserver.log
maxBytes: 100000000
backupCount: 3
filters: [context]
encoding: utf8
root:
level: INFO
handlers: [console] # to use file handler instead, switch to [file]
loggers:
synapse:
level: INFO
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
# example of enabling debugging for a component:
#
# synapse.federation.transport.server:
# level: DEBUG

View File

@@ -1,6 +0,0 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up required recording rules. [contrib/prometheus](../prometheus)

File diff suppressed because it is too large Load Diff

View File

@@ -1,157 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import sqlite3
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.util.frozenutils import unfreeze
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading from a Synapse SQLite database.
"""
conn = sqlite3.connect(db_name)
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
c = conn.execute(sql, (room_id,))
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
sql = (
"SELECT json, internal_metadata FROM event_json as j "
"INNER JOIN events as e ON e.event_id = j.event_id "
"WHERE j.room_id = ?"
)
args = [room_id]
if limit:
sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
args.append(limit)
c = conn.execute(sql, args)
events = [
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
for e in c.fetchall()
]
events.sort(key=lambda e: e.depth)
node_map = {}
state_groups = {}
graph = pydot.Dot(graph_name="Test")
for event in events:
c = conn.execute(
"SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
(event.event_id,),
)
res = c.fetchone()
state_group = res[0] if res else None
if state_group is not None:
state_groups.setdefault(state_group, []).append(event.event_id)
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
content = json.dumps(unfreeze(event.get_dict()["content"]))
label = (
"<"
"<b>%(name)s </b><br/>"
"Type: <b>%(type)s </b><br/>"
"State key: <b>%(state_key)s </b><br/>"
"Content: <b>%(content)s </b><br/>"
"Time: <b>%(time)s </b><br/>"
"Depth: <b>%(depth)s </b><br/>"
"State group: %(state_group)s<br/>"
">"
) % {
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": html.escape(content, quote=True),
"time": t,
"depth": event.depth,
"state_group": state_group,
}
node = pydot.Node(name=event.event_id, label=label)
node_map[event.event_id] = node
graph.add_node(node)
for event in events:
for prev_id in event.prev_event_ids():
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
node_map[prev_id] = end_node
graph.add_node(end_node)
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
for group, event_ids in state_groups.items():
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
for event_id in event_ids:
cluster.add_node(node_map[event_id])
graph.add_subgraph(cluster)
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
graph.write_svg("%s.svg" % file_prefix, prog="dot")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given Synapse SQLite file to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
help="String to prefix output files with",
default="graph_output",
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("db")
parser.add_argument("room")
args = parser.parse_args()
make_graph(args.db, args.room, args.prefix, args.limit)

View File

@@ -1,155 +0,0 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import pydot
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.util.frozenutils import unfreeze
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading line-delimited JSON from a file.
"""
print("Reading lines")
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
# Figure out the room version, assume the first line is the create event.
room_version = KNOWN_ROOM_VERSIONS[
json.loads(lines[0]).get("content", {}).get("room_version")
]
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
print("Loaded events.")
events.sort(key=lambda e: e.depth)
print("Sorted events")
if limit:
events = events[-int(limit) :]
node_map = {}
graph = pydot.Dot(graph_name="Test")
for event in events:
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
content = content.replace("\n", "<br/>\n")
print(content)
content = []
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, str):
pass
else:
value = json.dumps(value)
content.append(
"<b>%s</b>: %s,"
% (
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
)
)
content = "<br/>\n".join(content)
print(content)
label = (
"<"
"<b>%(name)s </b><br/>"
"Type: <b>%(type)s </b><br/>"
"State key: <b>%(state_key)s </b><br/>"
"Content: <b>%(content)s </b><br/>"
"Time: <b>%(time)s </b><br/>"
"Depth: <b>%(depth)s </b><br/>"
">"
) % {
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": content,
"time": t,
"depth": event.depth,
}
node = pydot.Node(name=event.event_id, label=label)
node_map[event.event_id] = node
graph.add_node(node)
print("Created Nodes")
for event in events:
for prev_id in event.prev_event_ids():
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
node_map[prev_id] = end_node
graph.add_node(end_node)
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
print("Created edges")
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
print("Created Dot")
graph.write_svg("%s.svg" % file_prefix, prog="dot")
print("Created svg")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by reading "
"from a file with line deliminated events. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
help="String to prefix output files with",
default="graph_output",
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
args = parser.parse_args()
make_graph(args.event_file, args.prefix, args.limit)

View File

@@ -1,48 +0,0 @@
This directory contains some sample monitoring config for using the
'Prometheus' monitoring server against synapse.
To use it, first install prometheus by following the instructions at
http://prometheus.io/
### for Prometheus v1
Add a new job to the main prometheus.conf file:
```yaml
job: {
name: "synapse"
target_group: {
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
}
}
```
### for Prometheus v2
Add a new job to the main prometheus.yml file:
```yaml
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
# when endpoint uses https:
scheme: "https"
static_configs:
- targets: ["my.server.here:port"]
```
An example of a Prometheus configuration with workers can be found in
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
To use `synapse.rules` add
```yaml
rule_files:
- "/PATH/TO/synapse-v2.rules"
```
Metrics are disabled by default when running synapse; they must be enabled
with the 'enable-metrics' option, either in the synapse config file or as a
command-line option.

View File

@@ -1,378 +0,0 @@
{{ template "head" . }}
{{ template "prom_content_head" . }}
<h1>System Resources</h1>
<h3>CPU</h3>
<div id="process_resource_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resource_utime"),
expr: "rate(process_cpu_seconds_total[2m]) * 100",
name: "[[job]]-[[index]]",
min: 0,
max: 100,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "%",
yTitle: "CPU Usage"
})
</script>
<h3>Memory</h3>
<div id="process_resident_memory_bytes"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resident_memory_bytes"),
expr: "process_resident_memory_bytes",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "bytes",
yTitle: "Usage"
})
</script>
<h3>File descriptors</h3>
<div id="process_fds"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_fds"),
expr: "process_open_fds",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "",
yTitle: "Descriptors"
})
</script>
<h1>Reactor</h1>
<h3>Total reactor time</h3>
<div id="reactor_total_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_total_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
name: "[[job]]-[[index]]",
max: 1,
min: 0,
renderer: "area",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Average reactor tick time</h3>
<div id="reactor_average_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_average_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>
<div id="synapse_storage_query_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_query_time"),
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
name: "[[verb]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "queries/s",
yTitle: "Queries"
})
</script>
<h3>Transactions</h3>
<div id="synapse_storage_transaction_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transaction_time"),
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
name: "[[job]]-[[index]] [[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "txn/s",
yTitle: "Transactions"
})
</script>
<h3>Transaction execution time</h3>
<div id="synapse_storage_transactions_time_sec"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transactions_time_sec"),
expr: "rate(synapse_storage_transaction_time_sum[2m])",
name: "[[job]]-[[index]] [[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Average time waiting for database connection</h3>
<div id="synapse_storage_avg_waiting_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_avg_waiting_time"),
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
name: "[[job]]-[[index]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
})
</script>
<h3>Cache request rate</h3>
<div id="synapse_cache_request_rate"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_request_rate"),
expr: "rate(synapse_util_caches_cache:total[2m])",
name: "[[job]]-[[index]] [[name]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "rps",
yTitle: "Cache request rate"
})
</script>
<h3>Cache size</h3>
<div id="synapse_cache_size"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_size"),
expr: "synapse_util_caches_cache:size",
name: "[[job]]-[[index]] [[name]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Items"
})
</script>
<h1>Requests</h1>
<h3>Requests by Servlet</h3>
<div id="synapse_http_server_request_count_servlet"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h4>&nbsp;(without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Average response times</h3>
<div id="synapse_http_server_response_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h3>All responses by code</h3>
<div id="synapse_http_server_responses"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses"),
expr: "rate(synapse_http_server_responses[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Error responses by code</h3>
<div id="synapse_http_server_responses_err"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses_err"),
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>CPU Usage</h3>
<div id="synapse_http_server_response_ru_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "CPU Usage"
})
</script>
<h3>DB Usage</h3>
<div id="synapse_http_server_response_db_txn_duration"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "DB Usage"
})
</script>
<h3>Average event send times</h3>
<div id="synapse_http_server_send_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h1>Federation</h1>
<h3>Sent Messages</h3>
<div id="synapse_federation_client_sent"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_client_sent"),
expr: "rate(synapse_federation_client_sent[2m])",
name: "[[job]]-[[index]] [[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Received Messages</h3>
<div id="synapse_federation_server_received"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_server_received"),
expr: "rate(synapse_federation_server_received[2m])",
name: "[[job]]-[[index]] [[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Pending</h3>
<div id="synapse_federation_transaction_queue_pending"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
expr: "synapse_federation_transaction_queue_pending",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Units"
})
</script>
<h1>Clients</h1>
<h3>Notifiers</h3>
<div id="synapse_notifier_listeners"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_listeners"),
expr: "synapse_notifier_listeners",
name: "[[job]]-[[index]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Listeners"
})
</script>
<h3>Notified Events</h3>
<div id="synapse_notifier_notified_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_notified_events"),
expr: "rate(synapse_notifier_notified_events[2m])",
name: "[[job]]-[[index]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "events/s",
yTitle: "Event rate"
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View File

@@ -1,74 +0,0 @@
groups:
- name: synapse
rules:
###
### Prometheus Console Only
### The following rules are only needed if you use the Prometheus Console
### in contrib/prometheus/consoles/synapse.html
###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus_total + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus_total + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus_total + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
expr: 'sum(synapse_federation_server_received_queries) by (job)'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "EDU"
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
###
### End of 'Prometheus Console Only' rules block
###
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###

View File

@@ -1,18 +0,0 @@
Purge history API examples
==========================
# `purge_history.sh`
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all messages in a list of rooms up to a certain event. You can select a
timeframe or a number of messages that you want to keep in the room.
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
the script.
# `purge_remote_media.sh`
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all old cached remote media.

View File

@@ -1,143 +0,0 @@
#!/usr/bin/env bash
# this script will use the api:
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
#
# It will purge all messages in a list of rooms up to a cetrain event
###################################################################################################
# define your domain and admin user
###################################################################################################
# add this user as admin in your home server:
DOMAIN=yourserver.tld
# add this user as admin in your home server:
ADMIN="@you_admin_username:$DOMAIN"
API_URL="$DOMAIN:8008/_matrix/client/r0"
###################################################################################################
#choose the rooms to prune old messages from (add a free comment at the end)
###################################################################################################
# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
ROOMS_ARRAY=(
'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
)
# ALTERNATIVELY:
# you can select all the rooms that are not encrypted and loop over the result:
# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
# or
# select all rooms with at least 100 members:
# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
###################################################################################################
# evaluate the EVENT_ID before which should be pruned
###################################################################################################
# choose a time before which the messages should be pruned:
TIME='12 months ago'
# ALTERNATIVELY:
# a certain time:
# TIME='2016-08-31 23:59:59'
# creates a timestamp from the given time string:
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
# ALTERNATIVELY:
# prune all messages that are older than 1000 messages ago:
# LAST_MESSAGES=1000
# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
# ALTERNATIVELY:
# select the EVENT_ID manually:
#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
###################################################################################################
# make the admin user a server admin in the database with
###################################################################################################
# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
###################################################################################################
# database function
###################################################################################################
sql (){
# for sqlite3:
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
# for postgres:
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
}
###################################################################################################
# get an access token
###################################################################################################
# for example externally by watching Riot in your browser's network inspector
# or internally on the server locally, use this:
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
AUTH="Authorization: Bearer $TOKEN"
###################################################################################################
# check, if your TOKEN works. For example this works:
###################################################################################################
# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
###################################################################################################
# finally start pruning the room:
###################################################################################################
# this will really delete local events, so the messages in the room really
# disappear unless they are restored by remote federation. This is because
# we pass {"delete_local_events":true} to the curl invocation below.
for ROOM in "${ROOMS_ARRAY[@]}"; do
echo "########################################### $(date) ################# "
echo "pruning room: $ROOM ..."
ROOM=${ROOM%#*}
#set -x
echo "check for alias in db..."
# for postgres:
sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
echo "get event..."
# for postgres:
EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
if [ "$EVENT_ID" == "" ]; then
echo "no event $TIME"
else
echo "event: $EVENT_ID"
SLEEP=2
set -x
# call purge
OUT=$(curl --header "$AUTH" -s -d '{"delete_local_events":true}' POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
if [ "$PURGE_ID" == "" ]; then
# probably the history purge is already in progress for $ROOM
: "continuing with next room"
else
while : ; do
# get status of purge and sleep longer each time if still active
sleep $SLEEP
STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
: "$ROOM --> Status: $STATUS"
[[ "$STATUS" == "active" ]] || break
SLEEP=$((SLEEP + 1))
done
fi
set +x
sleep 1
fi
done
###################################################################################################
# additionally
###################################################################################################
# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
# This can take a very long time (hours) and the client have to be stopped while you do so:
# $ synctl stop
# $ sqlite3 -line homeserver.db "vacuum;"
# $ synctl start
# This could be set, so you don't need to prune every time after deleting some rows:
# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
# be cautious, it could make the database somewhat slow if there are a lot of deletions
exit

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env bash
DOMAIN=yourserver.tld
# add this user as admin in your home server:
ADMIN="@you_admin_username:$DOMAIN"
API_URL="$DOMAIN:8008/_matrix/client/r0"
# choose a time before which the messages should be pruned:
# TIME='2016-08-31 23:59:59'
TIME='12 months ago'
# creates a timestamp from the given time string:
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
###################################################################################################
# database function
###################################################################################################
sql (){
# for sqlite3:
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
# for postgres:
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
}
###############################################################################
# make the admin user a server admin in the database with
###############################################################################
# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
###############################################################################
# get an access token
###############################################################################
# for example externally by watching Riot in your browser's network inspector
# or internally on the server locally, use this:
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
###############################################################################
# check, if your TOKEN works. For example this works:
###############################################################################
# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
###############################################################################
# optional check size before
###############################################################################
# echo calculate used storage before ...
# du -shc ../.synapse/media_store/*
###############################################################################
# finally start pruning media:
###############################################################################
set -x # for debugging the generated string
curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"

View File

@@ -1,57 +0,0 @@
name: matrix-synapse
base: core18
version: git
summary: Reference Matrix homeserver
description: |
Synapse is the reference Matrix homeserver.
Matrix is a federated and decentralised instant messaging and VoIP system.
grade: stable
confinement: strict
apps:
matrix-synapse:
command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
stop-command: synctl -c $SNAP_COMMON stop
plugs: [network-bind, network]
daemon: simple
hash-password:
command: hash_password
generate-config:
command: generate_config
generate-signing-key:
command: generate_signing_key
register-new-matrix-user:
command: register_new_matrix_user
plugs: [network]
synctl:
command: synctl
parts:
matrix-synapse:
source: .
plugin: python
python-version: python3
python-packages:
- '.[all]'
- pip
- setuptools
- setuptools-scm
- wheel
build-packages:
- libffi-dev
- libturbojpeg0-dev
- libssl-dev
- libxslt1-dev
- libpq-dev
- zlib1g-dev
stage-packages:
- libasn1-8-heimdal
- libgssapi3-heimdal
- libhcrypto4-heimdal
- libheimbase1-heimdal
- libheimntlm0-heimdal
- libhx509-5-heimdal
- libkrb5-26-heimdal
- libldap-2.4-2
- libpq5
- libsasl2-2

View File

@@ -1,3 +0,0 @@
The documentation for using systemd to manage synapse workers is now part of
the main synapse distribution. See
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).

View File

@@ -1,18 +0,0 @@
# Setup Synapse with Systemd
This is a setup for managing synapse with a user contributed systemd unit
file. It provides a `matrix-synapse` systemd unit file that should be tailored
to accommodate your installation in accordance with the installation
instructions provided in
[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
## Setup
1. Under the service section, ensure the `User` variable matches which user
you installed synapse under and wish to run it as.
2. Under the service section, ensure the `WorkingDirectory` variable matches
where you have installed synapse.
3. Under the service section, ensure the `ExecStart` variable matches the
appropriate locations of your installation.
4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
5. Start Synapse: `sudo systemctl start matrix-synapse`
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`

View File

@@ -1,25 +0,0 @@
version: 1
# In systemd's journal, loglevel is implicitly stored, so let's omit it
# from the message text.
formatters:
journal_fmt:
format: '%(name)s: [%(request)s] %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
journal:
class: systemd.journal.JournalHandler
formatter: journal_fmt
filters: [context]
SYSLOG_IDENTIFIER: synapse
root:
level: INFO
handlers: [journal]
disable_existing_loggers: False

View File

@@ -1,39 +0,0 @@
# Example systemd configuration file for synapse. Copy into
# /etc/systemd/system/, update the paths if necessary, then:
#
# systemctl enable matrix-synapse
# systemctl start matrix-synapse
#
# This assumes that Synapse has been installed by a user named
# synapse.
#
# This assumes that Synapse has been installed in a virtualenv in
# the user's home directory: `/home/synapse/synapse/env`.
#
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
[Unit]
Description=Synapse Matrix homeserver
# If you are using postgresql to persist data, uncomment this line to make sure
# synapse starts after the postgresql service.
# After=postgresql.service
[Service]
Type=notify
NotifyAccess=main
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-abort
User=synapse
Group=nogroup
WorkingDirectory=/home/synapse/synapse
ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
SyslogIdentifier=matrix-synapse
# adjust the cache factor if necessary
# Environment=SYNAPSE_CACHE_FACTOR=2.0
[Install]
WantedBy=multi-user.target

View File

@@ -1,71 +0,0 @@
[Service]
# The following directives give the synapse service R/W access to:
# - /run/matrix-synapse
# - /var/lib/matrix-synapse
# - /var/log/matrix-synapse
RuntimeDirectory=matrix-synapse
StateDirectory=matrix-synapse
LogsDirectory=matrix-synapse
######################
## Security Sandbox ##
######################
# Make sure that the service has its own unshared tmpfs at /tmp and that it
# cannot see or change any real devices
PrivateTmp=true
PrivateDevices=true
# We give no capabilities to a service by default
CapabilityBoundingSet=
AmbientCapabilities=
# Protect the following from modification:
# - The entire filesystem
# - sysctl settings and loaded kernel modules
# - No modifications allowed to Control Groups
# - Hostname
# - System Clock
ProtectSystem=strict
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
ProtectClock=true
ProtectHostname=true
# Prevent access to the following:
# - /home directory
# - Kernel logs
ProtectHome=tmpfs
ProtectKernelLogs=true
# Make sure that the process can only see PIDs and process details of itself,
# and the second option disables seeing details of things like system load and
# I/O etc
ProtectProc=invisible
ProcSubset=pid
# While not needed, we set these options explicitly
# - This process has been given access to the host network
# - It can also communicate with any IP Address
PrivateNetwork=false
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
IPAddressAllow=any
# Restrict system calls to a sane bunch
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources @obsolete
# Misc restrictions
# - Since the process is a python process it needs to be able to write and
# execute memory regions, so we set MemoryDenyWriteExecute to false
RestrictSUIDSGID=true
RemoveIPC=true
NoNewPrivileges=true
RestrictRealtime=true
RestrictNamespaces=true
LockPersonality=true
PrivateUsers=true
MemoryDenyWriteExecute=false

View File

@@ -1,2 +0,0 @@
vucbot.yaml
vertobot.yaml

View File

@@ -1,309 +0,0 @@
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010; # //
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
use IO::Async::Loop;
use Net::Async::WebSocket::Client;
use Net::Async::Matrix 0.11_002;
use JSON;
use YAML;
use Data::UUID;
use Getopt::Long;
use Data::Dumper;
binmode STDOUT, ":encoding(UTF-8)";
binmode STDERR, ":encoding(UTF-8)";
my $loop = IO::Async::Loop->new;
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
# https://rt.cpan.org/Ticket/Display.html?id=93107
ref $loop eq "IO::Async::Loop::Poll" and
warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
GetOptions(
'C|config=s' => \my $CONFIG,
'eval-from=s' => \my $EVAL_FROM,
) or exit 1;
if( defined $EVAL_FROM ) {
# An emergency 'eval() this file' hack
$SIG{HUP} = sub {
my $code = do {
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
local $/; <$fh>
};
eval $code or warn "Cannot eval() - $@";
};
}
defined $CONFIG or die "Must supply --config\n";
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
# No harm in always applying this
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
# Track every Room object, so we can ->leave them all on shutdown
my %bot_matrix_rooms;
my $bridgestate = {};
my $roomid_by_callid = {};
my $bot_verto = Net::Async::WebSocket::Client->new(
on_frame => sub {
my ( $self, $frame ) = @_;
warn "[Verto] receiving $frame";
on_verto_json($frame);
},
);
$loop->add( $bot_verto );
my $sessid = lc new Data::UUID->create_str();
my $bot_matrix = Net::Async::Matrix->new(
%MATRIX_CONFIG,
on_log => sub { warn "log: @_\n" },
on_invite => sub {
my ($matrix, $invite) = @_;
warn "[Matrix] invited to: " . $invite->{room_id} . " by " . $invite->{inviter} . "\n";
$matrix->join_room( $invite->{room_id} )->get;
},
on_room_new => sub {
my ($matrix, $room) = @_;
warn "[Matrix] have a room ID: " . $room->room_id . "\n";
$bot_matrix_rooms{$room->room_id} = $room;
# log in to verto on behalf of this room
$bridgestate->{$room->room_id}->{sessid} = $sessid;
$room->configure(
on_message => \&on_room_message,
);
my $f = send_verto_json_request("login", {
'login' => $CONFIG{'verto-dialog-params'}{'login'},
'passwd' => $CONFIG{'verto-config'}{'passwd'},
'sessid' => $sessid,
});
$matrix->adopt_future($f);
# we deliberately don't paginate the room, as we only care about
# new calls
},
on_unknown_event => \&on_unknown_event,
on_error => sub {
print STDERR "Matrix failure: @_\n";
},
);
$loop->add( $bot_matrix );
sub on_unknown_event
{
my ($matrix, $event) = @_;
print Dumper($event);
my $room_id = $event->{room_id};
my %dp = %{$CONFIG{'verto-dialog-params'}};
$dp{callID} = $bridgestate->{$room_id}->{callid};
if ($event->{type} eq 'm.call.invite') {
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
$bridgestate->{$room_id}->{gathered_candidates} = 0;
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
# no trickle ICE in verto apparently
}
elsif ($event->{type} eq 'm.call.candidates') {
# XXX: compare call IDs
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
$bridgestate->{$room_id}->{gathered_candidates} = 1;
my $offer = $bridgestate->{$room_id}->{offer};
my $candidate_block = {
audio => '',
video => '',
};
foreach (@{$event->{content}->{candidates}}) {
if ($_->{sdpMid}) {
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
}
else {
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
}
}
# XXX: assumes audio comes first
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
my $f = send_verto_json_request("verto.invite", {
"sdp" => $offer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$matrix->adopt_future($f);
}
else {
# ignore them, as no trickle ICE, although we might as well
# batch them up
# foreach (@{$event->{content}->{candidates}}) {
# push @{$bridgestate->{$room_id}->{candidates}}, $_;
# }
}
}
elsif ($event->{type} eq 'm.call.hangup') {
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
my $f = send_verto_json_request("verto.bye", {
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$matrix->adopt_future($f);
}
else {
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
}
}
else {
warn "Unhandled event: $event->{type}";
}
}
sub on_room_message
{
my ($room, $from, $content) = @_;
my $room_id = $room->room_id;
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
}
Future->needs_all(
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
$bot_matrix->start;
}),
$bot_verto->connect(
%{ $CONFIG{"verto-bot"} },
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
)->on_done( sub {
warn("[Verto] connected to websocket");
}),
)->get;
$loop->attach_signal(
PIPE => sub { warn "pipe\n" }
);
$loop->attach_signal(
INT => sub { $loop->stop },
);
$loop->attach_signal(
TERM => sub { $loop->stop },
);
eval {
$loop->run;
} or my $e = $@;
# When the bot gets shut down, have it leave the rooms so it's clear to observers
# that it is no longer running.
# if( $CONFIG{"leave-on-shutdown"} // 1 ) {
# print STDERR "Removing bot from Matrix rooms...\n";
# Future->wait_all( map { $_->leave->else_done() } values %bot_matrix_rooms )->get;
# }
# else {
# print STDERR "Leaving bot users in Matrix rooms.\n";
# }
die $e if $e;
exit 0;
{
my $json_id;
my $requests;
sub send_verto_json_request
{
$json_id ||= 1;
my ($method, $params) = @_;
my $json = {
jsonrpc => "2.0",
method => $method,
params => $params,
id => $json_id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
my $request = $loop->new_future;
$requests->{$json_id} = $request;
$json_id++;
return $request;
}
sub send_verto_json_response
{
my ($result, $id) = @_;
my $json = {
jsonrpc => "2.0",
result => $result,
id => $id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
}
sub on_verto_json
{
my $json = JSON->new->decode( $_[0] );
if ($json->{method}) {
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
$json->{method} eq 'verto.media') {
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
my $room = $bot_matrix_rooms{$room_id};
if ($json->{params}->{sdp}) {
# HACK HACK HACK HACK
$room->_do_POST_json( "/send/m.call.answer", {
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "answer",
},
})->then( sub {
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
}
else {
warn ("[Verto] unhandled method: " . $json->{method});
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
}
}
elsif ($json->{result}) {
$requests->{$json->{id}}->done($json->{result});
}
elsif ($json->{error}) {
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
}
}
}

View File

@@ -1,493 +0,0 @@
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010; # //
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
use IO::Async::Loop;
use Net::Async::WebSocket::Client;
use Net::Async::HTTP;
use Net::Async::HTTP::Server;
use JSON;
use YAML;
use Data::UUID;
use Getopt::Long;
use Data::Dumper;
use URI::Encode qw(uri_encode uri_decode);
binmode STDOUT, ":encoding(UTF-8)";
binmode STDERR, ":encoding(UTF-8)";
my $msisdn_to_matrix = {
'447417892400' => '@matthew:matrix.org',
};
my $matrix_to_msisdn = {};
foreach (keys %$msisdn_to_matrix) {
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
}
my $loop = IO::Async::Loop->new;
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
# https://rt.cpan.org/Ticket/Display.html?id=93107
# ref $loop eq "IO::Async::Loop::Poll" and
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
GetOptions(
'C|config=s' => \my $CONFIG,
'eval-from=s' => \my $EVAL_FROM,
) or exit 1;
if( defined $EVAL_FROM ) {
# An emergency 'eval() this file' hack
$SIG{HUP} = sub {
my $code = do {
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
local $/; <$fh>
};
eval $code or warn "Cannot eval() - $@";
};
}
defined $CONFIG or die "Must supply --config\n";
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
# No harm in always applying this
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
my $bridgestate = {};
my $roomid_by_callid = {};
my $sessid = lc new Data::UUID->create_str();
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
my $http = Net::Async::HTTP->new();
$loop->add( $http );
sub create_virtual_user
{
my ($localpart) = @_;
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/register?".
"access_token=$as_token&user_id=$localpart"
),
content_type => "application/json",
content => <<EOT
{
"type": "m.login.application_service",
"user": "$localpart"
}
EOT
)->get;
warn $response->as_string if ($response->code != 200);
}
my $http_server = Net::Async::HTTP::Server->new(
on_request => sub {
my $self = shift;
my ( $req ) = @_;
my $response;
my $path = uri_decode($req->path);
warn("request: $path");
if ($path =~ m#/users/\@(\+.*)#) {
# when queried about virtual users, auto-create them in the HS
my $localpart = $1;
create_virtual_user($localpart);
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
elsif ($path =~ m#/transactions/(.*)#) {
my $event = JSON->new->decode($req->body);
print Dumper($event);
my $room_id = $event->{room_id};
my %dp = %{$CONFIG{'verto-dialog-params'}};
$dp{callID} = $bridgestate->{$room_id}->{callid};
if ($event->{type} eq 'm.room.membership') {
my $membership = $event->{content}->{membership};
my $state_key = $event->{state_key};
my $room_id = $event->{state_id};
if ($membership eq 'invite') {
# autojoin invites
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/rooms/$room_id/join?".
"access_token=$as_token&user_id=$state_key"
),
content_type => "application/json",
content => "{}",
)->get;
warn $response->as_string if ($response->code != 200);
}
}
elsif ($event->{type} eq 'm.call.invite') {
my $room_id = $event->{room_id};
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{sessid} = $sessid;
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
my $offer = $event->{content}->{offer}->{sdp};
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
# no trickle ICE in verto apparently
my $f = send_verto_json_request("verto.invite", {
"sdp" => $offer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
# elsif ($event->{type} eq 'm.call.candidates') {
# # XXX: this could fire for both matrix->verto and verto->matrix calls
# # and races as it collects candidates. much better to just turn off
# # candidate gathering in the webclient entirely for now
#
# my $room_id = $event->{room_id};
# # XXX: compare call IDs
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
# my $offer = $bridgestate->{$room_id}->{offer};
# my $candidate_block = "";
# foreach (@{$event->{content}->{candidates}}) {
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
# }
# # XXX: collate using the right m= line - for now assume audio call
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
#
# my $f = send_verto_json_request("verto.invite", {
# "sdp" => $offer,
# "dialogParams" => \%dp,
# "sessid" => $bridgestate->{$room_id}->{sessid},
# });
# $self->adopt_future($f);
# }
# else {
# # ignore them, as no trickle ICE, although we might as well
# # batch them up
# # foreach (@{$event->{content}->{candidates}}) {
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
# # }
# }
# }
elsif ($event->{type} eq 'm.call.answer') {
# grab the answer and relay it to verto as a verto.answer
my $room_id = $event->{room_id};
my $answer = $event->{content}->{answer}->{sdp};
my $f = send_verto_json_request("verto.answer", {
"sdp" => $answer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
elsif ($event->{type} eq 'm.call.hangup') {
my $room_id = $event->{room_id};
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
my $f = send_verto_json_request("verto.bye", {
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
else {
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
}
}
else {
warn "Unhandled event: $event->{type}";
}
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
else {
warn "Unhandled path: $path";
$response = HTTP::Response->new( 404 );
}
$req->respond( $response );
},
);
$loop->add( $http_server );
$http_server->listen(
addr => { family => "inet", socktype => "stream", port => 8009 },
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
);
my $bot_verto = Net::Async::WebSocket::Client->new(
on_frame => sub {
my ( $self, $frame ) = @_;
warn "[Verto] receiving $frame";
on_verto_json($frame);
},
);
$loop->add( $bot_verto );
my $verto_connecting = $loop->new_future;
$bot_verto->connect(
%{ $CONFIG{"verto-bot"} },
on_connected => sub {
warn("[Verto] connected to websocket");
if (not $verto_connecting->is_done) {
$verto_connecting->done($bot_verto);
send_verto_json_request("login", {
'login' => $CONFIG{'verto-dialog-params'}{'login'},
'passwd' => $CONFIG{'verto-config'}{'passwd'},
'sessid' => $sessid,
});
}
},
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
);
# die Dumper($verto_connecting);
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
Future->needs_all(
$http->do_request(
method => "POST",
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
content_type => "application/json",
content => <<EOT
{
"as_token": "$as_token",
"url": "$as_url",
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
}
EOT
)->then( sub{
my ($response) = (@_);
warn $response->as_string if ($response->code != 200);
return Future->done;
}),
$verto_connecting,
)->get;
$loop->attach_signal(
PIPE => sub { warn "pipe\n" }
);
$loop->attach_signal(
INT => sub { $loop->stop },
);
$loop->attach_signal(
TERM => sub { $loop->stop },
);
eval {
$loop->run;
} or my $e = $@;
die $e if $e;
exit 0;
{
my $json_id;
my $requests;
sub send_verto_json_request
{
$json_id ||= 1;
my ($method, $params) = @_;
my $json = {
jsonrpc => "2.0",
method => $method,
params => $params,
id => $json_id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
my $request = $loop->new_future;
$requests->{$json_id} = $request;
$json_id++;
return $request;
}
sub send_verto_json_response
{
my ($result, $id) = @_;
my $json = {
jsonrpc => "2.0",
result => $result,
id => $id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
}
sub on_verto_json
{
my $json = JSON->new->decode( $_[0] );
if ($json->{method}) {
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
$json->{method} eq 'verto.media') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
if ($json->{params}->{sdp}) {
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.answer?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "answer",
},
}),
)->then( sub {
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
}
elsif ($json->{method} eq 'verto.invite') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
my $room_id;
# create a virtual user for the caller if needed.
create_virtual_user($caller);
# create a room of form #peer-peer and invite the callee
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/createRoom?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
room_alias_name => $alias,
invite => [ $callee_user ],
}),
)->then( sub {
my ( $response ) = @_;
my $resp = JSON->new->decode($response->content);
$room_id = $resp->{room_id};
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
})->get;
# join it
my ($response) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/join/$room_id?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => '{}',
)->get;
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
$bridgestate->{$room_id}->{sessid} = $sessid;
# put the m.call.invite in there
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.invite?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "offer",
},
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
elsif ($json->{method} eq 'verto.bye') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
# put the m.call.hangup into the room
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.hangup?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
else {
warn ("[Verto] unhandled method: " . $json->{method});
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
}
}
elsif ($json->{result}) {
$requests->{$json->{id}}->done($json->{result});
}
elsif ($json->{error}) {
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
}
}
}

View File

@@ -1,32 +0,0 @@
# Generic Matrix connection params
matrix:
server: 'matrix.org'
SSL: 1
# Bot-user connection details
matrix-bot:
user_id: '@vertobot:matrix.org'
password: ''
domain: 'matrix.org"
as_url: 'http://localhost:8009'
as_token: 'vertobot123'
verto-bot:
host: webrtc.freeswitch.org
service: 8081
url: "ws://webrtc.freeswitch.org:8081/"
verto-config:
passwd: 1234
verto-dialog-params:
useVideo: false
useStereo: false
tag: "webcam"
login: "1008@webrtc.freeswitch.org"
destination_number: "9664"
caller_id_name: "FreeSWITCH User"
caller_id_number: "1008"
callID: ""
remote_caller_id_name: "Outbound Call"
remote_caller_id_number: "9664"

View File

@@ -1,14 +0,0 @@
requires 'parent', 0;
requires 'Future', '>= 0.29';
requires 'Net::Async::Matrix', '>= 0.11_002';
requires 'Net::Async::Matrix::Utils';
requires 'Net::Async::WebSocket::Protocol', 0;
requires 'Data::UUID', 0;
requires 'IO::Async', '>= 0.63';
requires 'IO::Async::SSL', 0;
requires 'IO::Socket::SSL', 0;
requires 'YAML', 0;
requires 'JSON', 0;
requires 'Getopt::Long', 0;

View File

@@ -1,207 +0,0 @@
# JSON is shown in *reverse* chronological order.
# Send v. Receive is implicit.
{
"jsonrpc": "2.0",
"id": 7,
"result": {
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"message": "CALL ENDED",
"causeCode": 16,
"cause": "NORMAL_CLEARING",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
}
}
{
"jsonrpc": "2.0",
"method": "verto.bye",
"params": {
"dialogParams": {
"useVideo": false,
"useStereo": true,
"tag": "webcam",
"login": "1008@webrtc.freeswitch.org",
"destination_number": "9664",
"caller_id_name": "FreeSWITCH User",
"caller_id_number": "1008",
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"remote_caller_id_name": "Outbound Call",
"remote_caller_id_number": "9664"
},
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 7
}
{
"jsonrpc": "2.0",
"id": 6,
"result": {
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"action": "toggleHold",
"holdState": "active",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
}
}
{
"jsonrpc": "2.0",
"method": "verto.modify",
"params": {
"action": "toggleHold",
"dialogParams": {
"useVideo": false,
"useStereo": true,
"tag": "webcam",
"login": "1008@webrtc.freeswitch.org",
"destination_number": "9664",
"caller_id_name": "FreeSWITCH User",
"caller_id_number": "1008",
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"remote_caller_id_name": "Outbound Call",
"remote_caller_id_number": "9664"
},
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 6
}
{
"jsonrpc": "2.0",
"id": 5,
"result": {
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"action": "toggleHold",
"holdState": "held",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
}
}
{
"jsonrpc": "2.0",
"method": "verto.modify",
"params": {
"action": "toggleHold",
"dialogParams": {
"useVideo": false,
"useStereo": true,
"tag": "webcam",
"login": "1008@webrtc.freeswitch.org",
"destination_number": "9664",
"caller_id_name": "FreeSWITCH User",
"caller_id_number": "1008",
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"remote_caller_id_name": "Outbound Call",
"remote_caller_id_number": "9664"
},
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 5
}
{
"jsonrpc": "2.0",
"id": 349819,
"result": {
"method": "verto.answer"
}
}
{
"jsonrpc": "2.0",
"id": 349819,
"method": "verto.answer",
"params": {
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"sdp": "v=0\no=FreeSWITCH 1417101432 1417101433 IN IP4 209.105.235.10\ns=FreeSWITCH\nc=IN IP4 209.105.235.10\nt=0 0\na=msid-semantic: WMS jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\nm=audio 30134 RTP/SAVPF 111 126\na=rtpmap:111 opus/48000/2\na=fmtp:111 minptime=10; stereo=1\na=rtpmap:126 telephone-event/8000\na=silenceSupp:off - - - -\na=ptime:20\na=sendrecv\na=fingerprint:sha-256 F8:72:18:E9:72:89:99:22:5B:F8:B6:C6:C6:0D:C5:9B:B2:FB:BC:CA:8D:AB:13:8A:66:E1:37:38:A0:16:AA:41\na=rtcp-mux\na=rtcp:30134 IN IP4 209.105.235.10\na=ssrc:210967934 cname:rOIEajpw4FocakWY\na=ssrc:210967934 msid:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq a0\na=ssrc:210967934 mslabel:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\na=ssrc:210967934 label:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vqa0\na=ice-ufrag:OKwTmGLapwmxn7OF\na=ice-pwd:MmaMwq8rVmtWxfLbQ7U2Ew3T\na=candidate:2372654928 1 udp 659136 209.105.235.10 30134 typ host generation 0\n"
}
}
{
"jsonrpc": "2.0",
"id": 4,
"result": {
"message": "CALL CREATED",
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
}
}
{
"jsonrpc": "2.0",
"method": "verto.invite",
"params": {
"sdp": "v=0\r\no=- 1381685806032722557 2 IN IP4 127.0.0.1\r\ns=-\r\nt=0 0\r\na=group:BUNDLE audio\r\na=msid-semantic: WMS 6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\nm=audio 63088 RTP/SAVPF 111 103 104 0 8 106 105 13 126\r\nc=IN IP4 81.138.8.249\r\na=rtcp:63088 IN IP4 81.138.8.249\r\na=candidate:460398169 1 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:460398169 2 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:3460887983 1 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:3460887983 2 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:945327227 1 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:945327227 2 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:1441981097 1 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:1441981097 2 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:2160789855 1 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=candidate:2160789855 2 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=ice-ufrag:cP4qeRhn0LpcpA88\r\na=ice-pwd:fREmgSkXsDLGUUH1bwfrBQhW\r\na=ice-options:google-ice\r\na=fingerprint:sha-256 AF:35:64:1B:62:8A:EF:27:AE:2B:88:2E:FE:78:29:0B:08:DA:64:6C:DE:02:57:E3:EE:B1:D7:86:B8:36:8F:B0\r\na=setup:actpass\r\na=mid:audio\r\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\na=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\na=sendrecv\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=fmtp:111 minptime=10; stereo=1\r\na=rtpmap:103 ISAC/16000\r\na=rtpmap:104 ISAC/32000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:106 CN/32000\r\na=rtpmap:105 CN/16000\r\na=rtpmap:13 CN/8000\r\na=rtpmap:126 telephone-event/8000\r\na=maxptime:60\r\na=ssrc:558827154 cname:vdKHBNqa17t2gmE3\r\na=ssrc:558827154 msid:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\na=ssrc:558827154 mslabel:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\na=ssrc:558827154 label:bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\n",
"dialogParams": {
"useVideo": false,
"useStereo": true,
"tag": "webcam",
"login": "1008@webrtc.freeswitch.org",
"destination_number": "9664",
"caller_id_name": "FreeSWITCH User",
"caller_id_number": "1008",
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
"remote_caller_id_name": "Outbound Call",
"remote_caller_id_number": "9664"
},
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 4
}
{
"jsonrpc": "2.0",
"id": 3,
"result": {
"message": "logged in",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
}
}
{
"jsonrpc": "2.0",
"id": 1,
"error": {
"code": -32000,
"message": "Authentication Required"
}
}
{
"jsonrpc": "2.0",
"method": "login",
"params": {
"login": "1008@webrtc.freeswitch.org",
"passwd": "1234",
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 3
}
{
"jsonrpc": "2.0",
"id": 2,
"error": {
"code": -32000,
"message": "Authentication Required"
}
}
{
"jsonrpc": "2.0",
"method": "login",
"params": {
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 1
}
{
"jsonrpc": "2.0",
"method": "login",
"params": {
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
},
"id": 2
}

View File

@@ -1,33 +0,0 @@
# Creating multiple generic workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
```sh
#!/bin/bash
for i in {1..5}
do
cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs.

View File

@@ -1,145 +0,0 @@
# Creating multiple stream writers with a bash script
This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers.
Stream writers require both replication and HTTP listeners.
It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
# Start with these replication and http ports.
# The script loop starts with the exact port and then increments it by one.
REP_START_PORT=8034
HTTP_START_PORT=8044
# Stream writer workers to generate. Feel free to add or remove them as you wish.
# Event persister ("events") isn't included here as it does not require its
# own HTTP listener.
STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" )
NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]})
i=0
while [ $i -lt "$NUM_WRITERS" ]
do
cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml
worker_app: synapse.app.generic_worker
worker_name: ${STREAM_WRITERS[$i]}_stream_writer
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: $(expr $REP_START_PORT + $i)
resources:
- names: [replication]
- type: http
port: $(expr $HTTP_START_PORT + $i)
resources:
- names: [client]
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
host: 127.0.0.1
port: $(expr $REP_START_PORT + $i)
"
HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer
"
((i++))
done
cat << EXAMPLECONFIG
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information.
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
$HOMESERVER_YAML_INSTANCE_MAP
stream_writers:
$HOMESERVER_YAML_STREAM_WRITERS
EXAMPLECONFIG
```
Copy the code above save it to a file ```create_stream_writers.sh``` (for example).
Make the script executable by running ```chmod +x create_stream_writers.sh```.
## Run the script to create workers and print out a sample configuration
Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```.
```console
$ ./create_stream_writers.sh
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
presence_stream_writer:
host: 127.0.0.1
port: 8034
typing_stream_writer:
host: 127.0.0.1
port: 8035
receipts_stream_writer:
host: 127.0.0.1
port: 8036
to_device_stream_writer:
host: 127.0.0.1
port: 8037
account_data_stream_writer:
host: 127.0.0.1
port: 8038
stream_writers:
presence: presence_stream_writer
typing: typing_stream_writer
receipts: receipts_stream_writer
to_device: to_device_stream_writer
account_data: account_data_stream_writer
```
Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file.
## Write directly to Synapse configuration file
You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```.
If you do this, back up your original configuration file first:
```console
# Back up homeserver.yaml first
cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak
# Create workers and write output to your homeserver.yaml
./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml
```

21
database-prepare-for-0.0.1.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
# This is will prepare a synapse database for running with v0.0.1 of synapse.
# It will store all the user information, but will *delete* all messages and
# room data.
set -e
cp "$1" "$1.bak"
DUMP=$(sqlite3 "$1" << 'EOF'
.dump users
.dump access_tokens
.dump presence
.dump profiles
EOF
)
rm "$1"
sqlite3 "$1" <<< "$DUMP"

7
debian/.gitignore vendored
View File

@@ -1,7 +0,0 @@
/matrix-synapse-py3.*.debhelper
/matrix-synapse-py3.debhelper.log
/matrix-synapse-py3.substvars
/matrix-synapse-*/
/files
/debhelper-build-stamp
/.debhelper

32
debian/NEWS vendored
View File

@@ -1,32 +0,0 @@
matrix-synapse-py3 (0.34.0) stable; urgency=medium
matrix-synapse-py3 is intended as a drop-in replacement for the existing
matrix-synapse package. When the package is installed, matrix-synapse will be
automatically uninstalled. The replacement should be relatively seamless,
however, please note the following important differences to matrix-synapse:
* Most importantly, the matrix-synapse service now runs under Python 3 rather
than Python 2.7.
* Synapse is installed into its own virtualenv (in /opt/venvs/matrix-synapse)
instead of using the system python libraries. (This may mean that you can
remove a number of old dependencies with `apt autoremove`).
* If you have previously manually installed any custom python extensions
(such as matrix-synapse-rest-auth) into the system python directories, you
will need to reinstall them in the new virtualenv. Please consult the
documentation of the relevant extensions for further details.
matrix-synapse-py3 will take over responsibility for the existing
configuration files, including the matrix-synapse systemd service.
Beware, however, that `apt purge matrix-synapse` will *disable* the
matrix-synapse service (so that it will not be started on reboot), even
though that service is no longer being provided by the matrix-synapse
package. It can be re-enabled with `systemctl enable matrix-synapse`.
The matrix.org team will continue to provide Python 2 `matrix-synapse`
packages for the next couple of releases, to allow time for system
administrators to test the new packages.
-- Richard van der Hoff <richard@matrix.org> Wed, 19 Dec 2018 14:00:00 +0000

View File

@@ -1,139 +0,0 @@
#!/bin/bash
#
# runs dh_virtualenv to build the virtualenv in the build directory,
# and then runs the trial tests against the installed synapse.
set -e
export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
# make sure that the virtualenv links to the specific version of python, by
# dereferencing the python3 symlink.
#
# Otherwise, if somebody tries to install (say) the stretch package on buster,
# they will get a confusing error about "No module named 'synapse'", because
# python won't look in the right directory. At least this way, the error will
# be a *bit* more obvious.
#
SNAKE=$(readlink -e /usr/bin/python3)
# try to set the CFLAGS so any compiled C extensions are compiled with the most
# generic as possible x64 instructions, so that compiling it on a new Intel chip
# doesn't enable features not available on older ones or AMD.
#
# TODO: add similar things for non-amd64, or figure out a more generic way to
# do this.
case $(dpkg-architecture -q DEB_HOST_ARCH) in
amd64)
export CFLAGS=-march=x86-64
;;
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \
--extras systemd \
-o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
--python "$SNAKE" \
--upgrade-pip \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
case "$DEB_BUILD_OPTIONS" in
*nocheck*)
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
;;
*)
# Copy tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=$(mktemp -d)
trap 'rm -r $tmpdir' EXIT
cp -r tests "$tmpdir"
# To avoid pulling in the unbuilt Synapse in the local directory
pushd /
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
popd
;;
esac
# build the config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
# tweak the paths to the tls certs and signing keys
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
# tweak the pid file location
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
# tweak the path to the log config
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
# tweak the path to the media store
/^media_store_path:/ and s#/media_store#/media#;
# remove the server_name setting, which is set in a separate file
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
# remove the report_stats setting, which is set in a separate file
/^# report_stats:/ and $_ = "";
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.
PYPKG=$(basename "$SNAKE")
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
# add a couple of triggers. This is needed so that dh-virtualenv can rebuild
# the venv when the system python changes (see
# https://dh-virtualenv.readthedocs.io/en/latest/tutorial.html#step-2-set-up-packaging-for-your-project)
#
# we do it here rather than the more conventional way of just adding it to
# debian/matrix-synapse-py3.triggers, because we need to add a trigger on the
# right version of python.
cat >>"debian/.debhelper/generated/matrix-synapse-py3/triggers" <<EOF
# triggers for dh-virtualenv
interest-noawait $SNAKE
interest dh-virtualenv-interpreter-update
EOF

1950
debian/changelog vendored

File diff suppressed because it is too large Load Diff

1
debian/clean vendored
View File

@@ -1 +0,0 @@
exported_requirements.txt

Some files were not shown because too many files have changed in this diff Show More