Compare commits
4 Commits
v1.63.1
...
paul/schem
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01e83c9680 | ||
|
|
9705706a7f | ||
|
|
801a551da1 | ||
|
|
b8906b0ea8 |
@@ -1,93 +0,0 @@
|
||||
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
|
||||
{{- /*
|
||||
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
|
||||
we are creating a stylized header for each package.
|
||||
|
||||
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
|
||||
which is under the Unlicense licence.
|
||||
*/ -}}
|
||||
{{- $settings := .Settings -}}
|
||||
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
{{ "\033" }}[0;32m
|
||||
{{- else if eq .Result "SKIP" -}}
|
||||
{{ "\033" }}[0;33m
|
||||
{{- else -}}
|
||||
{{ "\033" }}[0;31m
|
||||
{{- end -}}
|
||||
📦 {{ .Name }}{{- "\033" }}[0m
|
||||
{{- with .Coverage -}}
|
||||
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
|
||||
{{- end -}}
|
||||
{{- "\n" -}}
|
||||
{{- with .Reason -}}
|
||||
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- with .Output -}}
|
||||
{{- . -}}{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- with .TestCases -}}
|
||||
{{- /* Failing tests are first */ -}}
|
||||
{{- range . -}}
|
||||
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
|
||||
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
{{- end -}})
|
||||
{{- "\033" -}}[0m
|
||||
{{- "\n" -}}
|
||||
|
||||
{{- with .Output -}}
|
||||
{{- formatTestOutput . $settings -}}
|
||||
{{- "\n" -}}
|
||||
{{- end -}}
|
||||
|
||||
::endgroup::{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Then skipped tests are second */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "SKIP" -}}
|
||||
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
{{- end -}})
|
||||
{{- "\033" -}}[0m
|
||||
{{- "\n" -}}
|
||||
|
||||
{{- with .Output -}}
|
||||
{{- formatTestOutput . $settings -}}
|
||||
{{- "\n" -}}
|
||||
{{- end -}}
|
||||
|
||||
::endgroup::{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Then passing tests are last */ -}}
|
||||
{{- range . -}}
|
||||
{{- if eq .Result "PASS" -}}
|
||||
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
|
||||
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
|
||||
{{- with .Coverage -}}
|
||||
, coverage: {{ . }}%
|
||||
{{- end -}})
|
||||
{{- "\033" -}}[0m
|
||||
{{- "\n" -}}
|
||||
|
||||
{{- with .Output -}}
|
||||
{{- formatTestOutput . $settings -}}
|
||||
{{- "\n" -}}
|
||||
{{- end -}}
|
||||
|
||||
::endgroup::{{- "\n" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- "\n" -}}
|
||||
{{- end -}}
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
title: CI run against latest deps is failing
|
||||
---
|
||||
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}
|
||||
@@ -1,19 +0,0 @@
|
||||
# Configuration file used for testing the 'synapse_port_db' script.
|
||||
# Tells the script to connect to the postgresql database that will be available in the
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: postgres
|
||||
host: localhost
|
||||
password: postgres
|
||||
database: synapse
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Fetches a version of complement which best matches the current build.
|
||||
#
|
||||
# The tarball is unpacked into `./complement`.
|
||||
|
||||
set -e
|
||||
mkdir -p complement
|
||||
|
||||
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
|
||||
# etc. we need to use different fallbacks:
|
||||
#
|
||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
||||
# for pull requests, otherwise GITHUB_REF).
|
||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
||||
# (GITHUB_BASE_REF for pull requests).
|
||||
# 3. Use the default complement branch ("HEAD").
|
||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
|
||||
# Skip empty branch names and merge commits.
|
||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="localhost", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
|
||||
#
|
||||
# Must be called after Synapse has been checked out to `synapse/`.
|
||||
#
|
||||
set -eu
|
||||
|
||||
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
|
||||
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
|
||||
|
||||
block Set Go Version
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
endblock
|
||||
|
||||
block Check out Complement
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
synapse/.ci/scripts/checkout_complement.sh
|
||||
endblock
|
||||
@@ -1,52 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Test for the export-data admin command against sqlite and postgres
|
||||
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir="/tmp/export_data/rooms"
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a sqlite database."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir2="/tmp/export_data2/rooms"
|
||||
if [ -d "$dir2" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a postgres database."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,83 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it
|
||||
# - installs the minimal system requirements, and poetry;
|
||||
# - patches the project definition file to refer to old versions only;
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
# - invokes `trial` to run the tests with old deps.
|
||||
|
||||
# Prevent tzdata from asking for user input
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
python3 python3-dev python3-pip python3-venv pipx \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
# TODO: in the future, we could use an implementation of
|
||||
# https://github.com/python-poetry/poetry/issues/3527
|
||||
# https://github.com/pypa/pip/issues/8085
|
||||
# to select the lowest possible versions, rather than resorting to this sed script.
|
||||
|
||||
# Patch the project definitions in-place:
|
||||
# - Replace all lower and tilde bounds with exact bounds
|
||||
# - Replace all caret bounds---but not the one that defines the supported Python version!
|
||||
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
|
||||
# - Use pyopenssl 17.0, which is the oldest version that works with
|
||||
# a `cryptography` compiled against OpenSSL 1.1.
|
||||
# - Omit systemd: we're not logging to journal here.
|
||||
|
||||
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
|
||||
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
|
||||
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
|
||||
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
|
||||
# runs on 3.8 (#12343).
|
||||
|
||||
sed -i \
|
||||
-e "s/[~>]=/==/g" \
|
||||
-e '/^python = "^/!s/\^/==/g' \
|
||||
-e "/psycopg2/d" \
|
||||
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
|
||||
-e '/systemd/d' \
|
||||
pyproject.toml
|
||||
|
||||
# Use poetry to do the installation. This ensures that the versions are all mutually
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install --user toml
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pipx install poetry==1.1.12
|
||||
~/.local/bin/poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
~/.local/bin/poetry install -E "all test"
|
||||
~/.local/bin/poetry run trial --jobs=2 tests
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - configures synapse and a postgres server.
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
#
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
# Now do the same again, on an empty database.
|
||||
|
||||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .ci/test_db.db
|
||||
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
@@ -1,16 +0,0 @@
|
||||
# Configuration file used for testing the 'synapse_port_db' script.
|
||||
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: ".ci/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
BIN
.ci/test_db.db
BIN
.ci/test_db.db
Binary file not shown.
@@ -1,4 +0,0 @@
|
||||
---
|
||||
title: CI run against Twisted trunk is failing
|
||||
---
|
||||
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}
|
||||
@@ -1,2 +0,0 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
14
.codecov.yml
14
.codecov.yml
@@ -1,14 +0,0 @@
|
||||
comment: off
|
||||
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 0 # Target % coverage, can be auto. Turned off for now
|
||||
threshold: null
|
||||
base: auto
|
||||
patch:
|
||||
default:
|
||||
target: 0
|
||||
threshold: null
|
||||
base: auto
|
||||
@@ -1,8 +0,0 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
include=$TOP/synapse/*
|
||||
data_file = $TOP/.coverage
|
||||
|
||||
[report]
|
||||
precision = 2
|
||||
@@ -1,11 +0,0 @@
|
||||
# ignore everything by default
|
||||
*
|
||||
|
||||
# things to include
|
||||
!docker
|
||||
!synapse
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
|
||||
**/__pycache__
|
||||
@@ -1,10 +0,0 @@
|
||||
# EditorConfig https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
11
.flake8
11
.flake8
@@ -1,11 +0,0 @@
|
||||
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
|
||||
# See https://github.com/PyCQA/flake8/issues/234
|
||||
[flake8]
|
||||
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
|
||||
# for error codes. The ones we ignore are:
|
||||
# W503: line break before binary operator
|
||||
# W504: line break after binary operator
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
@@ -1,11 +0,0 @@
|
||||
# Black reformatting (#5482).
|
||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
||||
|
||||
# Target Python 3.5 with black (#8664).
|
||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
|
||||
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
|
||||
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
# Automatically request reviews from the synapse-core team when a pull request comes in.
|
||||
* @matrix-org/synapse-core
|
||||
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
@@ -1,4 +0,0 @@
|
||||
# One username per supported platform and one custom link
|
||||
patreon: matrixdotorg
|
||||
liberapay: matrixdotorg
|
||||
custom: https://paypal.me/matrixdotorg
|
||||
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,5 +0,0 @@
|
||||
**If you are looking for support** please ask in **#synapse:matrix.org**
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
103
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
103
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -1,103 +0,0 @@
|
||||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
You can also preview your report before submitting it.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Describe the problem that you are experiencing
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproduction_steps
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: |
|
||||
Describe the series of steps that leads you to the problem.
|
||||
|
||||
Describe how what happens differs from what you expected.
|
||||
placeholder: Tell us what you see!
|
||||
value: |
|
||||
- list the steps
|
||||
- that reproduce the bug
|
||||
- using hyphens as bullet points
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
---
|
||||
|
||||
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
|
||||
- type: input
|
||||
id: homeserver
|
||||
attributes:
|
||||
label: Homeserver
|
||||
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Synapse Version
|
||||
description: |
|
||||
What version of Synapse is this homeserver running?
|
||||
|
||||
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
|
||||
|
||||
or with this command:
|
||||
|
||||
```
|
||||
$ curl http://localhost:8008/_synapse/admin/v1/server_version
|
||||
```
|
||||
|
||||
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: install_method
|
||||
attributes:
|
||||
label: Installation Method
|
||||
options:
|
||||
- Docker (matrixdotorg/synapse)
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
label: Platform
|
||||
description: |
|
||||
Tell us about the environment in which your homeserver is operating...
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else that would be useful to know?
|
||||
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Description:**
|
||||
|
||||
<!-- Describe here the feature you are requesting. -->
|
||||
10
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
10
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
@@ -1,10 +0,0 @@
|
||||
---
|
||||
name: Support request
|
||||
about: I need support for Synapse
|
||||
|
||||
---
|
||||
|
||||
Please don't file github issues asking for support.
|
||||
|
||||
Instead, please join [`#synapse:matrix.org`](https://matrix.to/#/#synapse:matrix.org)
|
||||
(from a matrix.org account if necessary), and ask there.
|
||||
14
.github/PULL_REQUEST_TEMPLATE.md
vendored
14
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,14 +0,0 @@
|
||||
### Pull Request Checklist
|
||||
|
||||
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
|
||||
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
|
||||
- Use markdown where necessary, mostly for `code blocks`.
|
||||
- End with either a period (.) or an exclamation mark (!).
|
||||
- Start with a capital letter.
|
||||
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
|
||||
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
|
||||
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
|
||||
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
|
||||
3
.github/SUPPORT.md
vendored
3
.github/SUPPORT.md
vendored
@@ -1,3 +0,0 @@
|
||||
[**#synapse:matrix.org**](https://matrix.to/#/#synapse:matrix.org) is the official support room for
|
||||
Synapse, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html.
|
||||
Please ask for support there, rather than filing github issues.
|
||||
57
.github/workflows/docker.yml
vendored
57
.github/workflows/docker.yml
vendored
@@ -1,57 +0,0 @@
|
||||
# GitHub actions workflow which builds and publishes the docker images.
|
||||
|
||||
name: Build docker images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["v*"]
|
||||
branches: [ master, main, develop ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
65
.github/workflows/docs.yaml
vendored
65
.github/workflows/docs.yaml
vendored
@@ -1,65 +0,0 @@
|
||||
name: Deploy the documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
# For bleeding-edge documentation
|
||||
- develop
|
||||
# For documentation specific to a release
|
||||
- 'release-v*'
|
||||
# stable docs
|
||||
- master
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
# Figure out the target directory.
|
||||
#
|
||||
# The target directory depends on the name of the branch
|
||||
#
|
||||
- name: Get the target directory name
|
||||
id: vars
|
||||
run: |
|
||||
# first strip the 'refs/heads/' prefix with some shell foo
|
||||
branch="${GITHUB_REF#refs/heads/}"
|
||||
|
||||
case $branch in
|
||||
release-*)
|
||||
# strip 'release-' from the name for release branches.
|
||||
branch="${branch#release-}"
|
||||
;;
|
||||
master)
|
||||
# deploy to "latest" for the master branch.
|
||||
branch="latest"
|
||||
;;
|
||||
esac
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "::set-output name=branch-version::$branch"
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
159
.github/workflows/latest_deps.yml
vendored
159
.github/workflows/latest_deps.yml
vendored
@@ -1,159 +0,0 @@
|
||||
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
|
||||
# dependencies which match the broad requirements. Since most CI runs are against
|
||||
# the locked poetry environment, run specifically against the latest dependencies to
|
||||
# know if there's an upcoming breaking change.
|
||||
#
|
||||
# As an overview this workflow:
|
||||
# - checks out develop,
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - runs mypy and test suites in that checkout.
|
||||
#
|
||||
# Based on the twisted trunk CI job.
|
||||
|
||||
name: Latest dependencies
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 7 * * *
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0b1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
|
||||
# `pip install matrix-synapse[all]` as closely as possible.
|
||||
- run: poetry update --no-dev
|
||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- database: "sqlite"
|
||||
- database: "postgres"
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: python -m twisted.trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
|
||||
sytest:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:testing
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
env:
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
run: rm /src/poetry.lock
|
||||
working-directory: /src
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
|
||||
# TODO: run complement (as with twisted trunk, see #12473).
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure()
|
||||
needs:
|
||||
# TODO: should mypy be included here? It feels more brittle than the other two.
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/latest_deps_build_failed_issue_template.md
|
||||
|
||||
121
.github/workflows/release-artifacts.yml
vendored
121
.github/workflows/release-artifacts.yml
vendored
@@ -1,121 +0,0 @@
|
||||
# GitHub actions workflow which builds the release artifacts.
|
||||
|
||||
name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs)
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
get-distros:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
dists='["debian:sid"]'
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "::set-output name=distros::$dists"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
|
||||
# now build the packages with a matrix build.
|
||||
build-debs:
|
||||
needs: get-distros
|
||||
name: "Build .deb packages"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v2
|
||||
|
||||
- name: Build the packages
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
# for the cache magic here
|
||||
run: |
|
||||
./src/scripts-dev/build_debian_packages.py \
|
||||
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||
--docker-build-arg=--progress=plain \
|
||||
--docker-build-arg=--load \
|
||||
"${{ matrix.distro }}"
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-sdist:
|
||||
name: "Build pypi distribution files"
|
||||
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
name: "Attach assets to release"
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
needs:
|
||||
- build-debs
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
# mark it as a prerelease if the tag contains 'rc'.
|
||||
prerelease: ${{ contains(github.ref, 'rc') }}
|
||||
371
.github/workflows/tests.yml
vendored
371
.github/workflows/tests.yml
vendored
@@ -1,371 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install .
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
lint:
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
||||
with:
|
||||
typechecking-extras: "all"
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
extras: ["all"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
extras: "all"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
# Note: sqlite only; no postgres
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:focal # For old python and sqlite
|
||||
# Note: focal seems to be using 3.8, but the oldest is 3.7?
|
||||
# See https://github.com/matrix-org/synapse/issues/12343
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
# Note: sqlite only; no postgres. Completely untested since poetry move.
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: focal
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
export-data:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: [linting-done, portdb]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "10"
|
||||
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:${{ matrix.postgres-version }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- check-sampleconfig
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
skippable:
|
||||
lint-newsfile
|
||||
162
.github/workflows/twisted_trunk.yml
vendored
162
.github/workflows/twisted_trunk.yml
vendored
@@ -1,162 +0,0 @@
|
||||
name: Twisted Trunk
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
extras: "all test"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- run: poetry run trial --jobs 2 tests
|
||||
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
# We symlink it before running poetry so that poetry actually
|
||||
# ends up installing to `/venv`.
|
||||
run: |
|
||||
ln -s -T /venv /src/.venv
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
working-directory: /src
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
env:
|
||||
# Use offline mode to avoid reinstalling the pinned version of
|
||||
# twisted.
|
||||
OFFLINE: 1
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
# This step is specific to the 'Twisted trunk' test run:
|
||||
- name: Patch dependencies
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.12
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.12 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure()
|
||||
needs:
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
- complement
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/twisted_trunk_build_failed_issue_template.md
|
||||
74
.gitignore
vendored
74
.gitignore
vendored
@@ -1,62 +1,26 @@
|
||||
# filename patterns
|
||||
*~
|
||||
*.pyc
|
||||
.*.swp
|
||||
.#*
|
||||
*.deb
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.py[cod]
|
||||
*.snap
|
||||
*.tac
|
||||
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
logs/
|
||||
dbs/
|
||||
*.egg
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
|
||||
# We do want the poetry lockfile.
|
||||
!poetry.lock
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
/*.log
|
||||
/*.log.*
|
||||
/*.log.config
|
||||
/*.pid
|
||||
/.python-version
|
||||
/*.signing.key
|
||||
/env/
|
||||
/.venv*/
|
||||
/homeserver*.yaml
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.pid
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
/.vscode/
|
||||
graph/*.svg
|
||||
graph/*.png
|
||||
graph/*.dot
|
||||
|
||||
# build products
|
||||
!/.coveragerc
|
||||
/.coverage*
|
||||
/.mypy_cache/
|
||||
/.tox
|
||||
/.tox-pg-container
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
# docs
|
||||
book/
|
||||
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
uploads
|
||||
|
||||
51
AUTHORS.rst
51
AUTHORS.rst
@@ -1,51 +0,0 @@
|
||||
The following is an incomplete list of people outside the core team who have
|
||||
contributed to Synapse. It is no longer maintained: more recent contributions
|
||||
are listed in the `changelog <CHANGES.md>`_.
|
||||
|
||||
----
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
Brabo <brabo at riseup.net>
|
||||
* Installation instruction fixes
|
||||
|
||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||
|
||||
Eric Myhre <hash at exultant.us>
|
||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
||||
repository API.
|
||||
|
||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
||||
* Add SAML2 support for registration and login.
|
||||
|
||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
||||
* Add CAS support for registration and login.
|
||||
|
||||
Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
||||
|
||||
Serban Constantin <serban.constantin at gmail dot com>
|
||||
* Small bug fix
|
||||
|
||||
Joseph Weston <joseph at weston.cloud>
|
||||
* Add admin API for querying HS version
|
||||
|
||||
Benjamin Saunders <ben.e.saunders at gmail dot com>
|
||||
* Documentation improvements
|
||||
|
||||
Werner Sembach <werner.sembach at fau dot de>
|
||||
* Automatically remove a group/community when it is empty
|
||||
1567
CHANGES.md
1567
CHANGES.md
File diff suppressed because it is too large
Load Diff
85
CHANGES.rst
Normal file
85
CHANGES.rst
Normal file
@@ -0,0 +1,85 @@
|
||||
Changes in synapse 0.2.0 (2014-09-02)
|
||||
=====================================
|
||||
This update changes many configuration options, updates the
|
||||
database schema and mandates SSL for server-server connections.
|
||||
|
||||
Homeserver:
|
||||
* Require SSL for server-server connections.
|
||||
* Add SSL listener for client-server connections.
|
||||
* Add ability to use config files.
|
||||
* Add support for kicking/banning and power levels.
|
||||
* Allow setting of room names and topics on creation.
|
||||
* Change presence to include last seen time of the user.
|
||||
* Change url path prefix to /_matrix/...
|
||||
* Bug fixes to presence.
|
||||
|
||||
Webclient:
|
||||
* Reskin the CSS for registration and login.
|
||||
* Various improvements to rooms CSS.
|
||||
* Support changes in client-server API.
|
||||
* Bug fixes to VOIP UI.
|
||||
* Various bug fixes to handling of changes to room member list.
|
||||
|
||||
Changes in synapse 0.1.2 (2014-08-29)
|
||||
=====================================
|
||||
|
||||
Webclient:
|
||||
* Add basic call state UI for VoIP calls.
|
||||
|
||||
Changes in synapse 0.1.1 (2014-08-29)
|
||||
=====================================
|
||||
|
||||
Homeserver:
|
||||
* Fix bug that caused the event stream to not notify some clients about
|
||||
changes.
|
||||
|
||||
Changes in synapse 0.1.0 (2014-08-29)
|
||||
=====================================
|
||||
Presence has been reenabled in this release.
|
||||
|
||||
Homeserver:
|
||||
* Update client to server API, including:
|
||||
- Use a more consistent url scheme.
|
||||
- Provide more useful information in the initial sync api.
|
||||
* Change the presence handling to be much more efficient.
|
||||
* Change the presence server to server API to not require explicit polling of
|
||||
all users who share a room with a user.
|
||||
* Fix races in the event streaming logic.
|
||||
|
||||
Webclient:
|
||||
* Update to use new client to server API.
|
||||
* Add basic VOIP support.
|
||||
* Add idle timers that change your status to away.
|
||||
* Add recent rooms column when viewing a room.
|
||||
* Various network efficiency improvements.
|
||||
* Add basic mobile browser support.
|
||||
* Add a settings page.
|
||||
|
||||
Changes in synapse 0.0.1 (2014-08-22)
|
||||
=====================================
|
||||
Presence has been disabled in this release due to a bug that caused the
|
||||
homeserver to spam other remote homeservers.
|
||||
|
||||
Homeserver:
|
||||
* Completely change the database schema to support generic event types.
|
||||
* Improve presence reliability.
|
||||
* Improve reliability of joining remote rooms.
|
||||
* Fix bug where room join events were duplicated.
|
||||
* Improve initial sync API to return more information to the client.
|
||||
* Stop generating fake messages for room membership events.
|
||||
|
||||
Webclient:
|
||||
* Add tab completion of names.
|
||||
* Add ability to upload and send images.
|
||||
* Add profile pages.
|
||||
* Improve CSS layout of room.
|
||||
* Disambiguate identical display names.
|
||||
* Don't get remote users display names and avatars individually.
|
||||
* Use the new initial sync API to reduce number of round trips to the homeserver.
|
||||
* Change url scheme to use room aliases instead of room ids where known.
|
||||
* Increase longpoll timeout.
|
||||
|
||||
Changes in synapse 0.0.0 (2014-08-13)
|
||||
=====================================
|
||||
|
||||
* Initial alpha release
|
||||
@@ -1,3 +0,0 @@
|
||||
# Welcome to Synapse
|
||||
|
||||
Please see the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html) in our rendered documentation.
|
||||
@@ -1,7 +0,0 @@
|
||||
# Installation Instructions
|
||||
|
||||
This document has moved to the
|
||||
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
||||
Please update your links.
|
||||
|
||||
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).
|
||||
3
MANIFEST.in
Normal file
3
MANIFEST.in
Normal file
@@ -0,0 +1,3 @@
|
||||
recursive-include docs *
|
||||
recursive-include tests *.py
|
||||
recursive-include synapse/persistence/schema *.sql
|
||||
35
MAP.rst
Normal file
35
MAP.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
Directory Structure
|
||||
===================
|
||||
|
||||
Warning: this may be a bit stale...
|
||||
|
||||
::
|
||||
|
||||
.
|
||||
├── cmdclient Basic CLI python Matrix client
|
||||
├── demo Scripts for running standalone Matrix demos
|
||||
├── docs All doc, including the draft Matrix API spec
|
||||
│ ├── client-server The client-server Matrix API spec
|
||||
│ ├── model Domain-specific elements of the Matrix API spec
|
||||
│ ├── server-server The server-server model of the Matrix API spec
|
||||
│ └── sphinx The internal API doc of the Synapse homeserver
|
||||
├── experiments Early experiments of using Synapse's internal APIs
|
||||
├── graph Visualisation of Matrix's distributed message store
|
||||
├── synapse The reference Matrix homeserver implementation
|
||||
│ ├── api Common building blocks for the APIs
|
||||
│ │ ├── events Definition of state representation Events
|
||||
│ │ └── streams Definition of streamable Event objects
|
||||
│ ├── app The __main__ entry point for the homeserver
|
||||
│ ├── crypto The PKI client/server used for secure federation
|
||||
│ │ └── resource PKI helper objects (e.g. keys)
|
||||
│ ├── federation Server-server state replication logic
|
||||
│ ├── handlers The main business logic of the homeserver
|
||||
│ ├── http Wrappers around Twisted's HTTP server & client
|
||||
│ ├── rest Servlet-style RESTful API
|
||||
│ ├── storage Persistence subsystem (currently only sqlite3)
|
||||
│ │ └── schema sqlite persistence schema
|
||||
│ └── util Synapse-specific utilities
|
||||
├── tests Unit tests for the Synapse homeserver
|
||||
└── webclient Basic AngularJS Matrix web client
|
||||
|
||||
|
||||
632
README.rst
632
README.rst
@@ -1,486 +1,290 @@
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
|
||||
.. contents::
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
- Chatrooms are distributed and do not exist on any single server. Rooms
|
||||
can be found using names like ``#matrix:matrix.org`` or
|
||||
``#test:localhost:8008`` or they can be ephemeral.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
client <----> homeserver <=====================> homeserver <----> client
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
https://matrix.org/_matrix https://mydomain.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.libera.chat/matrix.
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
To get up and running:
|
||||
|
||||
- To simply play with an **existing** homeserver you can
|
||||
just go straight to http://matrix.org/alpha.
|
||||
|
||||
- To run your own **private** homeserver on localhost:8008, install synapse
|
||||
with ``python setup.py develop --user`` and then run one with
|
||||
``python synapse/app/homeserver.py`` - you will find a webclient running
|
||||
at http://localhost:8008 (use a recent Chrome, Safari or Firefox for now,
|
||||
please...)
|
||||
|
||||
- To make the homeserver **public** and let it exchange messages with
|
||||
other homeservers and participate in the overall Matrix federation, open
|
||||
up port 8448 and run ``python synapse/app/homeserver.py --host
|
||||
machine.my.domain.name``. Then come join ``#matrix:matrix.org`` and
|
||||
say hi! :)
|
||||
|
||||
For more detailed setup instructions, please see further down this document.
|
||||
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
|
||||
which handle:
|
||||
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure[1] synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption[2]
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
|
||||
These APIs are intended to be implemented on a wide range of servers, services
|
||||
and clients, letting developers build messaging and VoIP functionality on top
|
||||
of the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
and clients, letting developers build messaging and VoIP functionality on top of
|
||||
the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted for clarity and
|
||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
||||
the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
a Matrix homeserver which stores all their personal chat history and user
|
||||
account information - much as a mail client connects through to an IMAP/SMTP
|
||||
server. Just like email, you can either run your own Matrix homeserver and
|
||||
control and own your own communications and history or use one hosted by someone
|
||||
else (e.g. matrix.org) - there is no single point of control or mandatory
|
||||
service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
commandline utility which lets you easily see what the JSON APIs are up to).
|
||||
|
||||
Thanks for using Matrix!
|
||||
We'd like to invite you to take a look at the Matrix spec, try to run a
|
||||
homeserver, and join the existing Matrix chatrooms already out there, experiment
|
||||
with the APIs and the demo clients, and let us know your thoughts at
|
||||
https://github.com/matrix-org/synapse/issues or at matrix@matrix.org.
|
||||
|
||||
Support
|
||||
=======
|
||||
Thanks for trying Matrix!
|
||||
|
||||
For support installing or managing Synapse, please join |room|_ (from a matrix.org
|
||||
account if necessary) and ask questions there. We do not use GitHub issues for
|
||||
support requests, only for bug reports and feature requests.
|
||||
[1] Cryptographic signing of messages isn't turned on yet
|
||||
|
||||
Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
|
||||
with its source available in |docs|_.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
|
||||
.. _federation:
|
||||
|
||||
* For details on how to install synapse, see
|
||||
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
|
||||
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
||||
[2] End-to-end encryption is currently in development
|
||||
|
||||
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
Homeserver Installation
|
||||
=======================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
First, the dependencies need to be installed. Start by installing
|
||||
'python2.7-dev' and the various tools of the compiler toolchain.
|
||||
N.B. synapse requires python 2.x where x >= 7
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
Installing prerequisites on ubuntu::
|
||||
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
$ sudo apt-get install build-essential python2.7-dev libffi-dev
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
.. _`client-user-reg`:
|
||||
$ xcode-select --install
|
||||
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
The homeserver has a number of external dependencies, that are easiest
|
||||
to install by making setup.py do so, in --user mode::
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
|
||||
$ python setup.py develop --user
|
||||
|
||||
You'll need a version of setuptools new enough to know about git, so you
|
||||
may need to also run:
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via a Matrix client.
|
||||
$ sudo apt-get install python-pip
|
||||
$ sudo pip install --upgrade setuptools
|
||||
|
||||
If you don't have access to github, then you may need to install ``syutil``
|
||||
manually by checking it out and running ``python setup.py develop --user`` on it
|
||||
too.
|
||||
|
||||
If you get errors about ``sodium.h`` being missing, you may also need to
|
||||
manually install a newer PyNaCl via pip as setuptools installs an old one. Or
|
||||
you can check PyNaCl out of git directly (https://github.com/pyca/pynacl) and
|
||||
installing it. Installing PyNaCl using pip may also work (remember to remove any
|
||||
other versions installed by setuputils in, for example, ~/.local/lib).
|
||||
|
||||
Your new user name will be formed partly from the ``server_name``, and partly
|
||||
from a localpart you specify when you create the account. Your name will take
|
||||
the form of::
|
||||
This will run a process of downloading and installing into your
|
||||
user's .local/lib directory all of the required dependencies that are
|
||||
missing.
|
||||
|
||||
@localpart:my.domain.name
|
||||
Once this is done, you may wish to run the homeserver's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
$ python setup.py test
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Security note
|
||||
=============
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
||||
repository endpoints`_.
|
||||
|
||||
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
|
||||
|
||||
Whilst we make a reasonable effort to mitigate against XSS attacks (for
|
||||
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
|
||||
domain hosting other web applications. This especially applies to sharing
|
||||
the domain with Matrix web clients and other sensitive applications like
|
||||
webmail. See
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more
|
||||
information.
|
||||
|
||||
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
|
||||
|
||||
Ideally, the homeserver should not simply be on a different subdomain, but on
|
||||
a completely different `registered domain`_ (also known as top-level site or
|
||||
eTLD+1). This is because `some attacks`_ are still possible as long as the two
|
||||
applications share the same registered domain.
|
||||
|
||||
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
|
||||
|
||||
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
|
||||
|
||||
To illustrate this with an example, if your Element Web or other sensitive web
|
||||
application is hosted on ``A.example1.com``, you should ideally host Synapse on
|
||||
``example2.com``. Some amount of protection is offered by hosting on
|
||||
``B.example1.com`` instead, so this is also acceptable in some scenarios.
|
||||
However, you should *not* host your Synapse on ``A.example1.com``.
|
||||
|
||||
Note that all of the above refers exclusively to the domain used in Synapse's
|
||||
``public_baseurl`` setting. In particular, it has no bearing on the domain
|
||||
mentioned in MXIDs hosted on that server.
|
||||
|
||||
Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
Upgrading an existing homeserver
|
||||
================================
|
||||
|
||||
The instructions for upgrading synapse are in `the upgrade notes`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
Before upgrading an existing homeserver to a new version, please refer to
|
||||
UPGRADE.rst for any additional instructions.
|
||||
|
||||
|
||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
||||
Setting up Federation
|
||||
=====================
|
||||
|
||||
.. _reverse-proxy:
|
||||
In order for other homeservers to send messages to your server, it will need to
|
||||
be publicly visible on the internet, and they will need to know its host name.
|
||||
You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
1) Use the machine's own hostname as available on public DNS in the form of its
|
||||
A or AAAA records. This is easier to set up initially, perhaps for testing,
|
||||
but lacks the flexibility of SRV.
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
record in DNS, but gives the flexibility to run the server on your own
|
||||
choice of TCP port, on a machine that might not be the same name as the
|
||||
domain name.
|
||||
|
||||
For the first form, simply pass the required hostname (of the machine) as the
|
||||
--host parameter::
|
||||
|
||||
$ python synapse/app/homeserver.py \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
|
||||
For the second form, first create your SRV record and publish it in DNS. This
|
||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
||||
and port where the server is running. (At the current time synapse does not
|
||||
support clustering multiple servers into a single logical homeserver). The DNS
|
||||
record would then look something like::
|
||||
|
||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||
|
||||
At this point, you should then run the homeserver with the hostname of this
|
||||
SRV record, as that is the name other machines will expect it to have::
|
||||
|
||||
$ python synapse/app/homeserver.py \
|
||||
--server-name YOURDOMAIN \
|
||||
--bind-port 8448 \
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
increase the verbosity of logging output; at least for initial testing.
|
||||
|
||||
For the initial alpha release, the homeserver is not speaking TLS for
|
||||
either client-server or server-server traffic for ease of debugging. We have
|
||||
also not spent any time yet getting the homeserver to run behind loadbalancers.
|
||||
|
||||
Running a Demo Federation of Homeservers
|
||||
----------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||
``localhost:8082``) which you can then access through the webclient running at
|
||||
http://localhost:8080. Simply run::
|
||||
|
||||
$ demo/start.sh
|
||||
|
||||
Running The Demo Web Client
|
||||
===========================
|
||||
|
||||
The homeserver runs a web client by default at http://localhost:8080.
|
||||
|
||||
If this is the first time you have used the client from that browser (it uses
|
||||
HTML5 local storage to remember its config), you will need to log in to your
|
||||
account. If you don't yet have an account, because you've just started the
|
||||
homeserver for the first time, then you'll need to register one.
|
||||
|
||||
|
||||
Registering A New Account
|
||||
-------------------------
|
||||
|
||||
Your new user name will be formed partly from the hostname your server is
|
||||
running as, and partly from a localpart you specify when you create the
|
||||
account. Your name will take the form of::
|
||||
|
||||
@localpart:my.domain.here
|
||||
(pronounced "at localpart on my dot domain dot here")
|
||||
|
||||
Specify your desired localpart in the topmost box of the "Register for an
|
||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||
required due to lack of SRV records (e.g. @matthew:localhost:8080 on an internal
|
||||
synapse sandbox running on localhost)
|
||||
|
||||
|
||||
Logging In To An Existing Account
|
||||
---------------------------------
|
||||
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
For information on configuring one, see `<docs/reverse_proxy.md>`_.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
before creating that mapping.
|
||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data. Meanwhile
|
||||
the job of publishing the end-to-end encryption public keys for Matrix users is
|
||||
also very security-sensitive for similar reasons.
|
||||
|
||||
**They are not where accounts or credentials are stored - these live on home
|
||||
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
||||
track 3PID logins and publish end-user public keys.
|
||||
|
||||
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||
is purely to authenticate and track 3PID logins and publish end-user public
|
||||
keys.
|
||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||
as the primary means of identity and E2E encryption is not complete. As such,
|
||||
we're not yet running an identity server in public.
|
||||
|
||||
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||
you. We therefore recommend that you use one of the centralised identity servers
|
||||
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||
|
||||
To reiterate: the Identity server will only be used if you choose to associate
|
||||
an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
For now, please go spelunking in the ``docs/`` directory to find out.
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
|
||||
or by directly editing the database as shown below.
|
||||
Building Internal API Documentation
|
||||
===================================
|
||||
|
||||
First calculate the hash of the new password::
|
||||
Before building internal API documentation install spinx and
|
||||
sphinxcontrib-napoleon::
|
||||
|
||||
$ ~/synapse/env/bin/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
$ pip install sphinx
|
||||
$ pip install sphinxcontrib-napoleon
|
||||
|
||||
Then update the ``users`` table in the database::
|
||||
Building internal API documentation::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
$ python setup.py build_sphinx
|
||||
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
The best place to get started is our
|
||||
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
|
||||
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
|
||||
information for synapse developers as well as synapse administrators.
|
||||
|
||||
Developers might be particularly interested in:
|
||||
|
||||
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
|
||||
* `notes on Synapse's implementation details <https://matrix-org.github.io/synapse/latest/development/internal_documentation/index.html>`_, and
|
||||
* `how we use git <https://matrix-org.github.io/synapse/latest/development/git.html>`_.
|
||||
|
||||
Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
for other installation methods.) Then ask poetry to create a virtual environment
|
||||
from the project and install Synapse's dependencies::
|
||||
|
||||
poetry install --extras "all test"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||
|
||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||
for more information.
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
poetry run synapse_homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
poetry run synapse_homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
Running the unit tests
|
||||
----------------------
|
||||
|
||||
After getting up and running, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
|
||||
poetry run trial tests
|
||||
|
||||
This should end with a 'PASSED' result (note that exact numbers will
|
||||
differ)::
|
||||
|
||||
Ran 1337 tests in 716.064s
|
||||
|
||||
PASSED (skips=15, successes=1322)
|
||||
|
||||
For more tips on running the unit tests, like running a specific test or
|
||||
to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
|
||||
|
||||
|
||||
Running the Integration Tests
|
||||
-----------------------------
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `SyTest installation
|
||||
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Need help? Join our community support room on Matrix:
|
||||
`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If synapse runs out of file handles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of March 2019 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
Help!! Synapse is slow and eats all my RAM/CPU!
|
||||
-----------------------------------------------
|
||||
|
||||
First, ensure you are running the latest version of Synapse, using Python 3
|
||||
with a PostgreSQL database.
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in the future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall memory use, and especially in terms of giving back
|
||||
RAM to the OS. To use it, the library must simply be put in the
|
||||
LD_PRELOAD environment variable when launching Synapse. On Debian, this
|
||||
can be done by installing the ``libjemalloc1`` package and adding this
|
||||
line to ``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
If you're encountering high CPU use by the Synapse process itself, you
|
||||
may be affected by a bug with presence tracking that leads to a
|
||||
massive excess of outgoing federation requests (see `discussion
|
||||
<https://github.com/matrix-org/synapse/issues/3971>`_). If metrics
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by setting
|
||||
the following in the Synapse config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
presence:
|
||||
enabled: false
|
||||
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See
|
||||
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
:alt: (discuss development on #synapse-dev:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse-dev:matrix.org
|
||||
|
||||
.. |documentation| image:: https://img.shields.io/badge/documentation-%E2%9C%93-success
|
||||
:alt: (Rendered documentation on GitHub Pages)
|
||||
:target: https://matrix-org.github.io/synapse/latest/
|
||||
|
||||
.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse
|
||||
:alt: (check license in LICENSE file)
|
||||
:target: LICENSE
|
||||
|
||||
.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse
|
||||
:alt: (latest version released on PyPi)
|
||||
:target: https://pypi.org/project/matrix-synapse
|
||||
|
||||
.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse
|
||||
:alt: (supported python versions)
|
||||
:target: https://pypi.org/project/matrix-synapse
|
||||
|
||||
53
UPGRADE.rst
53
UPGRADE.rst
@@ -1,7 +1,50 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
Upgrading to v0.2.0
|
||||
===================
|
||||
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
|
||||
Please update your links.
|
||||
The home server now requires setting up of SSL config before it can run. To
|
||||
automatically generate default config use::
|
||||
|
||||
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
|
||||
$ python synapse/app/homeserver.py \
|
||||
--server-name machine.my.domain.name \
|
||||
--bind-port 8448 \
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
|
||||
This config can be edited if desired, for example to specify a different SSL
|
||||
certificate to use. Once done you can run the home server using::
|
||||
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
|
||||
See the README.rst for more information.
|
||||
|
||||
Also note that some config options have been renamed, including:
|
||||
|
||||
- "host" to "server-name"
|
||||
- "database" to "database-path"
|
||||
- "port" to "bind-port" and "unsecure-port"
|
||||
|
||||
|
||||
Upgrading to v0.0.1
|
||||
===================
|
||||
|
||||
This release completely changes the database schema and so requires upgrading
|
||||
it before starting the new version of the homeserver.
|
||||
|
||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
but will otherwise purge the database. This includes messages, which
|
||||
rooms the home server was a member of and room alias mappings.
|
||||
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
restart than usual as it reinitializes the database.
|
||||
|
||||
On startup of the new version, users can either rejoin remote rooms using room
|
||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
automatically rejoin the room.
|
||||
|
||||
7
WISHLIST.rst
Normal file
7
WISHLIST.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
Broad-sweeping stuff which would be nice to have
|
||||
================================================
|
||||
|
||||
- Additional SQL backends beyond sqlite
|
||||
- homeserver implementation in go
|
||||
- homeserver implementation in node.js
|
||||
- client SDKs
|
||||
39
book.toml
39
book.toml
@@ -1,39 +0,0 @@
|
||||
# Documentation for possible options in this file is at
|
||||
# https://rust-lang.github.io/mdBook/format/config.html
|
||||
[book]
|
||||
title = "Synapse"
|
||||
authors = ["The Matrix.org Foundation C.I.C."]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
|
||||
# The directory that documentation files are stored in
|
||||
src = "docs"
|
||||
|
||||
[build]
|
||||
# Prevent markdown pages from being automatically generated when they're
|
||||
# linked to in SUMMARY.md
|
||||
create-missing = false
|
||||
|
||||
[output.html]
|
||||
# The URL visitors will be directed to when they try to edit a page
|
||||
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
|
||||
|
||||
# Remove the numbers that appear before each item in the sidebar, as they can
|
||||
# get quite messy as we nest deeper
|
||||
no-section-label = true
|
||||
|
||||
# The source code URL of the repository
|
||||
git-repository-url = "https://github.com/matrix-org/synapse"
|
||||
|
||||
# The path that the docs are hosted on
|
||||
site-url = "/synapse/"
|
||||
|
||||
# Additional HTML, JS, CSS that's injected into each page of the book.
|
||||
# More information available in docs/website_files/README.md
|
||||
additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
1
changelog.d/.gitignore
vendored
1
changelog.d/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
!.gitignore
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
""" Starts a synapse client console. """
|
||||
|
||||
from twisted.internet import reactor, defer, threads
|
||||
from http import TwistedHttpClient
|
||||
|
||||
import argparse
|
||||
import binascii
|
||||
import cmd
|
||||
import getpass
|
||||
import json
|
||||
@@ -24,23 +27,21 @@ import shlex
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
from http import TwistedHttpClient
|
||||
from typing import Optional
|
||||
|
||||
import urlparse
|
||||
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
|
||||
from twisted.internet import defer, reactor, threads
|
||||
import nacl.signing
|
||||
import nacl.encoding
|
||||
|
||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||
|
||||
CONFIG_JSON = "cmdclient_config.json"
|
||||
|
||||
# TODO: The concept of trusted identity servers has been deprecated. This option and checks
|
||||
# should be removed
|
||||
TRUSTED_ID_SERVERS = ["localhost:8001"]
|
||||
|
||||
TRUSTED_ID_SERVERS = [
|
||||
'localhost:8001'
|
||||
]
|
||||
|
||||
class SynapseCmd(cmd.Cmd):
|
||||
|
||||
"""Basic synapse command-line processor.
|
||||
|
||||
This processes commands from the user and calls the relevant HTTP methods.
|
||||
@@ -57,7 +58,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
"token": token,
|
||||
"verbose": "on",
|
||||
"complete_usernames": "on",
|
||||
"send_delivery_receipts": "on",
|
||||
"send_delivery_receipts": "on"
|
||||
}
|
||||
self.path_prefix = "/_matrix/client/api/v1"
|
||||
self.event_stream_token = "END"
|
||||
@@ -92,7 +93,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
return self.config["user"].split(":")[1]
|
||||
|
||||
def do_config(self, line):
|
||||
"""Show the config for this client: "config"
|
||||
""" Show the config for this client: "config"
|
||||
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
||||
Config variables:
|
||||
user: The username to auth with.
|
||||
@@ -108,7 +109,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
|
||||
"""
|
||||
if len(line) == 0:
|
||||
print(json.dumps(self.config, indent=4))
|
||||
print json.dumps(self.config, indent=4)
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -118,11 +119,12 @@ class SynapseCmd(cmd.Cmd):
|
||||
config_rules = [ # key, valid_values
|
||||
("verbose", ["on", "off"]),
|
||||
("complete_usernames", ["on", "off"]),
|
||||
("send_delivery_receipts", ["on", "off"]),
|
||||
("send_delivery_receipts", ["on", "off"])
|
||||
]
|
||||
for key, valid_vals in config_rules:
|
||||
if key == args["key"] and args["val"] not in valid_vals:
|
||||
print("%s value must be one of %s" % (args["key"], valid_vals))
|
||||
print "%s value must be one of %s" % (args["key"],
|
||||
valid_vals)
|
||||
return
|
||||
|
||||
# toggle the http client verbosity
|
||||
@@ -131,11 +133,11 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
# assign the new config
|
||||
self.config[args["key"]] = args["val"]
|
||||
print(json.dumps(self.config, indent=4))
|
||||
print json.dumps(self.config, indent=4)
|
||||
|
||||
save_config(self.config)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
def do_register(self, line):
|
||||
"""Registers for a new account: "register <userid> <noupdate>"
|
||||
@@ -143,50 +145,36 @@ class SynapseCmd(cmd.Cmd):
|
||||
<noupdate> : Do not automatically clobber config values.
|
||||
"""
|
||||
args = self._parse(line, ["userid", "noupdate"])
|
||||
path = "/register"
|
||||
|
||||
password = None
|
||||
pwd = None
|
||||
pwd2 = "_"
|
||||
while pwd != pwd2:
|
||||
pwd = getpass.getpass("Type a password for this user: ")
|
||||
pwd = getpass.getpass("(Optional) Type a password for this user: ")
|
||||
if len(pwd) == 0:
|
||||
print "Not using a password for this user."
|
||||
break
|
||||
pwd2 = getpass.getpass("Retype the password: ")
|
||||
if pwd != pwd2 or len(pwd) == 0:
|
||||
print("Password mismatch.")
|
||||
pwd = None
|
||||
if pwd != pwd2:
|
||||
print "Password mismatch."
|
||||
else:
|
||||
password = pwd
|
||||
|
||||
body = {"type": "m.login.password"}
|
||||
body = {}
|
||||
if "userid" in args:
|
||||
body["user"] = args["userid"]
|
||||
body["user_id"] = args["userid"]
|
||||
if password:
|
||||
body["password"] = password
|
||||
|
||||
reactor.callFromThread(self._do_register, body, "noupdate" not in args)
|
||||
reactor.callFromThread(self._do_register, "POST", path, body,
|
||||
"noupdate" not in args)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_register(self, data, update_config):
|
||||
# check the registration flows
|
||||
url = self._url() + "/register"
|
||||
json_res = yield self.http_client.do_request("GET", url)
|
||||
print(json.dumps(json_res, indent=4))
|
||||
|
||||
passwordFlow = None
|
||||
for flow in json_res["flows"]:
|
||||
if flow["type"] == "m.login.recaptcha" or (
|
||||
"stages" in flow and "m.login.recaptcha" in flow["stages"]
|
||||
):
|
||||
print("Unable to register: Home server requires captcha.")
|
||||
return
|
||||
if flow["type"] == "m.login.password" and "stages" not in flow:
|
||||
passwordFlow = flow
|
||||
break
|
||||
|
||||
if not passwordFlow:
|
||||
return
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=data)
|
||||
print(json.dumps(json_res, indent=4))
|
||||
def _do_register(self, method, path, data, update_config):
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request(method, url, data=data)
|
||||
print json.dumps(json_res, indent=4)
|
||||
if update_config and "user_id" in json_res:
|
||||
self.config["user"] = json_res["user_id"]
|
||||
self.config["token"] = json_res["access_token"]
|
||||
@@ -198,7 +186,9 @@ class SynapseCmd(cmd.Cmd):
|
||||
"""
|
||||
try:
|
||||
args = self._parse(line, ["user_id"], force_keys=True)
|
||||
can_login = threads.blockingCallFromThread(reactor, self._check_can_login)
|
||||
can_login = threads.blockingCallFromThread(
|
||||
reactor,
|
||||
self._check_can_login)
|
||||
if can_login:
|
||||
p = getpass.getpass("Enter your password: ")
|
||||
user = args["user_id"]
|
||||
@@ -206,25 +196,29 @@ class SynapseCmd(cmd.Cmd):
|
||||
domain = self._domain()
|
||||
if domain:
|
||||
user = "@" + user + ":" + domain
|
||||
|
||||
|
||||
reactor.callFromThread(self._do_login, user, p)
|
||||
# print " got %s " % p
|
||||
#print " got %s " % p
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_login(self, user, password):
|
||||
path = "/login"
|
||||
data = {"user": user, "password": password, "type": "m.login.password"}
|
||||
data = {
|
||||
"user": user,
|
||||
"password": password,
|
||||
"type": "m.login.password"
|
||||
}
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request("POST", url, data=data)
|
||||
print(json_res)
|
||||
print json_res
|
||||
|
||||
if "access_token" in json_res:
|
||||
self.config["user"] = user
|
||||
self.config["token"] = json_res["access_token"]
|
||||
save_config(self.config)
|
||||
print("Login successful.")
|
||||
print "Login successful."
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_can_login(self):
|
||||
@@ -233,19 +227,18 @@ class SynapseCmd(cmd.Cmd):
|
||||
# submitting!
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request("GET", url)
|
||||
print(json_res)
|
||||
print json_res
|
||||
|
||||
if "flows" not in json_res:
|
||||
print("Failed to find any login flows.")
|
||||
print "Failed to find any login flows."
|
||||
defer.returnValue(False)
|
||||
|
||||
flow = json_res["flows"][0] # assume first is the one we want.
|
||||
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
|
||||
flow = json_res["flows"][0] # assume first is the one we want.
|
||||
if ("type" not in flow or "m.login.password" != flow["type"] or
|
||||
"stages" in flow):
|
||||
fallback_url = self._url() + "/login/fallback"
|
||||
print(
|
||||
"Unable to login via the command line client. Please visit "
|
||||
"%s to login." % fallback_url
|
||||
)
|
||||
print ("Unable to login via the command line client. Please visit "
|
||||
"%s to login." % fallback_url)
|
||||
defer.returnValue(False)
|
||||
defer.returnValue(True)
|
||||
|
||||
@@ -255,34 +248,21 @@ class SynapseCmd(cmd.Cmd):
|
||||
<clientSecret> A string of characters generated when requesting an email that you'll supply in subsequent calls to identify yourself
|
||||
<sendAttempt> The number of times the user has requested an email. Leave this the same between requests to retry the request at the transport level. Increment it to request that the email be sent again.
|
||||
"""
|
||||
args = self._parse(line, ["address", "clientSecret", "sendAttempt"])
|
||||
args = self._parse(line, ['address', 'clientSecret', 'sendAttempt'])
|
||||
|
||||
postArgs = {
|
||||
"email": args["address"],
|
||||
"clientSecret": args["clientSecret"],
|
||||
"sendAttempt": args["sendAttempt"],
|
||||
}
|
||||
postArgs = {'email': args['address'], 'clientSecret': args['clientSecret'], 'sendAttempt': args['sendAttempt']}
|
||||
|
||||
reactor.callFromThread(self._do_emailrequest, postArgs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_emailrequest(self, args):
|
||||
# TODO: Update to use v2 Identity Service API endpoint
|
||||
url = (
|
||||
self._identityServerUrl()
|
||||
+ "/_matrix/identity/api/v1/validate/email/requestToken"
|
||||
)
|
||||
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/requestToken"
|
||||
|
||||
json_res = yield self.http_client.do_request(
|
||||
"POST",
|
||||
url,
|
||||
data=urllib.urlencode(args),
|
||||
jsonreq=False,
|
||||
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
|
||||
)
|
||||
print(json_res)
|
||||
if "sid" in json_res:
|
||||
print("Token sent. Your session ID is %s" % (json_res["sid"]))
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
if 'sid' in json_res:
|
||||
print "Token sent. Your session ID is %s" % (json_res['sid'])
|
||||
|
||||
def do_emailvalidate(self, line):
|
||||
"""Validate and associate a third party ID
|
||||
@@ -290,58 +270,39 @@ class SynapseCmd(cmd.Cmd):
|
||||
<token> The token sent to your third party identifier address
|
||||
<clientSecret> The same clientSecret you supplied in requestToken
|
||||
"""
|
||||
args = self._parse(line, ["sid", "token", "clientSecret"])
|
||||
args = self._parse(line, ['sid', 'token', 'clientSecret'])
|
||||
|
||||
postArgs = {
|
||||
"sid": args["sid"],
|
||||
"token": args["token"],
|
||||
"clientSecret": args["clientSecret"],
|
||||
}
|
||||
postArgs = { 'sid' : args['sid'], 'token' : args['token'], 'clientSecret': args['clientSecret'] }
|
||||
|
||||
reactor.callFromThread(self._do_emailvalidate, postArgs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_emailvalidate(self, args):
|
||||
# TODO: Update to use v2 Identity Service API endpoint
|
||||
url = (
|
||||
self._identityServerUrl()
|
||||
+ "/_matrix/identity/api/v1/validate/email/submitToken"
|
||||
)
|
||||
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/submitToken"
|
||||
|
||||
json_res = yield self.http_client.do_request(
|
||||
"POST",
|
||||
url,
|
||||
data=urllib.urlencode(args),
|
||||
jsonreq=False,
|
||||
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
|
||||
)
|
||||
print(json_res)
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
|
||||
def do_3pidbind(self, line):
|
||||
"""Validate and associate a third party ID
|
||||
<sid> The session ID (sid) given to you in the response to requestToken
|
||||
<clientSecret> The same clientSecret you supplied in requestToken
|
||||
"""
|
||||
args = self._parse(line, ["sid", "clientSecret"])
|
||||
args = self._parse(line, ['sid', 'clientSecret'])
|
||||
|
||||
postArgs = {"sid": args["sid"], "clientSecret": args["clientSecret"]}
|
||||
postArgs["mxid"] = self.config["user"]
|
||||
postArgs = { 'sid' : args['sid'], 'clientSecret': args['clientSecret'] }
|
||||
postArgs['mxid'] = self.config["user"]
|
||||
|
||||
reactor.callFromThread(self._do_3pidbind, postArgs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_3pidbind(self, args):
|
||||
# TODO: Update to use v2 Identity Service API endpoint
|
||||
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
|
||||
url = self._identityServerUrl()+"/_matrix/identity/api/v1/3pid/bind"
|
||||
|
||||
json_res = yield self.http_client.do_request(
|
||||
"POST",
|
||||
url,
|
||||
data=urllib.urlencode(args),
|
||||
jsonreq=False,
|
||||
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
|
||||
)
|
||||
print(json_res)
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
|
||||
def do_join(self, line):
|
||||
"""Joins a room: "join <roomid>" """
|
||||
@@ -349,7 +310,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["roomid"], force_keys=True)
|
||||
self._do_membership_change(args["roomid"], "join", self._usr())
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
def do_joinalias(self, line):
|
||||
try:
|
||||
@@ -357,34 +318,36 @@ class SynapseCmd(cmd.Cmd):
|
||||
path = "/join/%s" % urllib.quote(args["roomname"])
|
||||
reactor.callFromThread(self._run_and_pprint, "POST", path, {})
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
def do_topic(self, line):
|
||||
""" "topic [set|get] <roomid> [<newtopic>]"
|
||||
""""topic [set|get] <roomid> [<newtopic>]"
|
||||
Set the topic for a room: topic set <roomid> <newtopic>
|
||||
Get the topic for a room: topic get <roomid>
|
||||
"""
|
||||
try:
|
||||
args = self._parse(line, ["action", "roomid", "topic"])
|
||||
if "action" not in args or "roomid" not in args:
|
||||
print("Must specify set|get and a room ID.")
|
||||
print "Must specify set|get and a room ID."
|
||||
return
|
||||
if args["action"].lower() not in ["set", "get"]:
|
||||
print("Must specify set|get, not %s" % args["action"])
|
||||
print "Must specify set|get, not %s" % args["action"]
|
||||
return
|
||||
|
||||
path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
|
||||
|
||||
if args["action"].lower() == "set":
|
||||
if "topic" not in args:
|
||||
print("Must specify a new topic.")
|
||||
print "Must specify a new topic."
|
||||
return
|
||||
body = {"topic": args["topic"]}
|
||||
body = {
|
||||
"topic": args["topic"]
|
||||
}
|
||||
reactor.callFromThread(self._run_and_pprint, "PUT", path, body)
|
||||
elif args["action"].lower() == "get":
|
||||
reactor.callFromThread(self._run_and_pprint, "GET", path)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
def do_invite(self, line):
|
||||
"""Invite a user to a room: "invite <userid> <roomid>" """
|
||||
@@ -395,66 +358,49 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
reactor.callFromThread(self._do_invite, args["roomid"], user_id)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_invite(self, roomid, userstring):
|
||||
if not userstring.startswith("@") and self._is_on("complete_usernames"):
|
||||
# TODO: Update to use v2 Identity Service API endpoint
|
||||
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
|
||||
if (not userstring.startswith('@') and
|
||||
self._is_on("complete_usernames")):
|
||||
url = self._identityServerUrl()+"/_matrix/identity/api/v1/lookup"
|
||||
|
||||
json_res = yield self.http_client.do_request(
|
||||
"GET", url, qparams={"medium": "email", "address": userstring}
|
||||
)
|
||||
json_res = yield self.http_client.do_request("GET", url, qparams={'medium':'email','address':userstring})
|
||||
|
||||
mxid = None
|
||||
|
||||
if "mxid" in json_res and "signatures" in json_res:
|
||||
# TODO: Update to use v2 Identity Service API endpoint
|
||||
url = (
|
||||
self._identityServerUrl()
|
||||
+ "/_matrix/identity/api/v1/pubkey/ed25519"
|
||||
)
|
||||
if 'mxid' in json_res and 'signatures' in json_res:
|
||||
url = self._identityServerUrl()+"/_matrix/identity/api/v1/pubkey/ed25519"
|
||||
|
||||
pubKey = None
|
||||
pubKeyObj = yield self.http_client.do_request("GET", url)
|
||||
if "public_key" in pubKeyObj:
|
||||
pubKey = decode_verify_key_bytes(
|
||||
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
|
||||
)
|
||||
if 'public_key' in pubKeyObj:
|
||||
pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
|
||||
else:
|
||||
print("No public key found in pubkey response!")
|
||||
print "No public key found in pubkey response!"
|
||||
|
||||
sigValid = False
|
||||
|
||||
if pubKey:
|
||||
for signame in json_res["signatures"]:
|
||||
for signame in json_res['signatures']:
|
||||
if signame not in TRUSTED_ID_SERVERS:
|
||||
print(
|
||||
"Ignoring signature from untrusted server %s"
|
||||
% (signame)
|
||||
)
|
||||
print "Ignoring signature from untrusted server %s" % (signame)
|
||||
else:
|
||||
try:
|
||||
verify_signed_json(json_res, signame, pubKey)
|
||||
sigValid = True
|
||||
print(
|
||||
"Mapping %s -> %s correctly signed by %s"
|
||||
% (userstring, json_res["mxid"], signame)
|
||||
)
|
||||
print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
|
||||
break
|
||||
except SignatureVerifyException as e:
|
||||
print("Invalid signature from %s" % (signame))
|
||||
print(e)
|
||||
print "Invalid signature from %s" % (signame)
|
||||
print e
|
||||
|
||||
if sigValid:
|
||||
print("Resolved 3pid %s to %s" % (userstring, json_res["mxid"]))
|
||||
mxid = json_res["mxid"]
|
||||
print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
|
||||
mxid = json_res['mxid']
|
||||
else:
|
||||
print(
|
||||
"Got association for %s but couldn't verify signature"
|
||||
% (userstring)
|
||||
)
|
||||
print "Got association for %s but couldn't verify signature" % (userstring)
|
||||
|
||||
if not mxid:
|
||||
mxid = "@" + userstring + ":" + self._domain()
|
||||
@@ -467,17 +413,18 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["roomid"], force_keys=True)
|
||||
self._do_membership_change(args["roomid"], "leave", self._usr())
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print e
|
||||
|
||||
def do_send(self, line):
|
||||
"""Sends a message. "send <roomid> <body>" """
|
||||
args = self._parse(line, ["roomid", "body"])
|
||||
txn_id = "txn%s" % int(time.time())
|
||||
path = "/rooms/%s/send/m.room.message/%s" % (
|
||||
urllib.quote(args["roomid"]),
|
||||
txn_id,
|
||||
)
|
||||
body_json = {"msgtype": "m.text", "body": args["body"]}
|
||||
path = "/rooms/%s/send/m.room.message/%s" % (urllib.quote(args["roomid"]),
|
||||
txn_id)
|
||||
body_json = {
|
||||
"msgtype": "m.text",
|
||||
"body": args["body"]
|
||||
}
|
||||
reactor.callFromThread(self._run_and_pprint, "PUT", path, body_json)
|
||||
|
||||
def do_list(self, line):
|
||||
@@ -490,11 +437,11 @@ class SynapseCmd(cmd.Cmd):
|
||||
"list messages <roomid> from=END&to=START&limit=3"
|
||||
"""
|
||||
args = self._parse(line, ["type", "roomid", "qp"])
|
||||
if "type" not in args or "roomid" not in args:
|
||||
print("Must specify type and room ID.")
|
||||
if not "type" in args or not "roomid" in args:
|
||||
print "Must specify type and room ID."
|
||||
return
|
||||
if args["type"] not in ["members", "messages"]:
|
||||
print("Unrecognised type: %s" % args["type"])
|
||||
print "Unrecognised type: %s" % args["type"]
|
||||
return
|
||||
room_id = args["roomid"]
|
||||
path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
|
||||
@@ -505,11 +452,12 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
key_value = key_value_str.split("=")
|
||||
qp[key_value[0]] = key_value[1]
|
||||
except Exception:
|
||||
print("Bad query param: %s" % key_value)
|
||||
except:
|
||||
print "Bad query param: %s" % key_value
|
||||
return
|
||||
|
||||
reactor.callFromThread(self._run_and_pprint, "GET", path, query_params=qp)
|
||||
reactor.callFromThread(self._run_and_pprint, "GET", path,
|
||||
query_params=qp)
|
||||
|
||||
def do_create(self, line):
|
||||
"""Creates a room.
|
||||
@@ -545,22 +493,14 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["method", "path", "data"])
|
||||
# sanity check
|
||||
if "method" not in args or "path" not in args:
|
||||
print("Must specify path and method.")
|
||||
print "Must specify path and method."
|
||||
return
|
||||
|
||||
args["method"] = args["method"].upper()
|
||||
valid_methods = [
|
||||
"PUT",
|
||||
"GET",
|
||||
"POST",
|
||||
"DELETE",
|
||||
"XPUT",
|
||||
"XGET",
|
||||
"XPOST",
|
||||
"XDELETE",
|
||||
]
|
||||
valid_methods = ["PUT", "GET", "POST", "DELETE",
|
||||
"XPUT", "XGET", "XPOST", "XDELETE"]
|
||||
if args["method"] not in valid_methods:
|
||||
print("Unsupported method: %s" % args["method"])
|
||||
print "Unsupported method: %s" % args["method"]
|
||||
return
|
||||
|
||||
if "data" not in args:
|
||||
@@ -569,7 +509,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
args["data"] = json.loads(args["data"])
|
||||
except Exception as e:
|
||||
print("Data is not valid JSON. %s" % e)
|
||||
print "Data is not valid JSON. %s" % e
|
||||
return
|
||||
|
||||
qp = {"access_token": self._tok()}
|
||||
@@ -582,16 +522,13 @@ class SynapseCmd(cmd.Cmd):
|
||||
parsed_url = urlparse.urlparse(args["path"])
|
||||
qp.update(urlparse.parse_qs(parsed_url.query))
|
||||
args["path"] = parsed_url.path
|
||||
except Exception:
|
||||
except:
|
||||
pass
|
||||
|
||||
reactor.callFromThread(
|
||||
self._run_and_pprint,
|
||||
args["method"],
|
||||
args["path"],
|
||||
args["data"],
|
||||
query_params=qp,
|
||||
)
|
||||
reactor.callFromThread(self._run_and_pprint, args["method"],
|
||||
args["path"],
|
||||
args["data"],
|
||||
query_params=qp)
|
||||
|
||||
def do_stream(self, line):
|
||||
"""Stream data from the server: "stream <longpoll timeout ms>" """
|
||||
@@ -601,31 +538,26 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
timeout = int(args["timeout"])
|
||||
except ValueError:
|
||||
print("Timeout must be in milliseconds.")
|
||||
print "Timeout must be in milliseconds."
|
||||
return
|
||||
reactor.callFromThread(self._do_event_stream, timeout)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_event_stream(self, timeout):
|
||||
res = yield defer.ensureDeferred(
|
||||
self.http_client.get_json(
|
||||
res = yield self.http_client.get_json(
|
||||
self._url() + "/events",
|
||||
{
|
||||
"access_token": self._tok(),
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token,
|
||||
},
|
||||
)
|
||||
)
|
||||
print(json.dumps(res, indent=4))
|
||||
"from": self.event_stream_token
|
||||
})
|
||||
print json.dumps(res, indent=4)
|
||||
|
||||
if "chunk" in res:
|
||||
for event in res["chunk"]:
|
||||
if (
|
||||
event["type"] == "m.room.message"
|
||||
and self._is_on("send_delivery_receipts")
|
||||
and event["user_id"] != self._usr()
|
||||
): # not sent by us
|
||||
if (event["type"] == "m.room.message" and
|
||||
self._is_on("send_delivery_receipts") and
|
||||
event["user_id"] != self._usr()): # not sent by us
|
||||
self._send_receipt(event, "d")
|
||||
|
||||
# update the position in the stram
|
||||
@@ -633,28 +565,18 @@ class SynapseCmd(cmd.Cmd):
|
||||
self.event_stream_token = res["end"]
|
||||
|
||||
def _send_receipt(self, event, feedback_type):
|
||||
path = "/rooms/%s/messages/%s/%s/feedback/%s/%s" % (
|
||||
urllib.quote(event["room_id"]),
|
||||
event["user_id"],
|
||||
event["msg_id"],
|
||||
self._usr(),
|
||||
feedback_type,
|
||||
)
|
||||
path = ("/rooms/%s/messages/%s/%s/feedback/%s/%s" %
|
||||
(urllib.quote(event["room_id"]), event["user_id"], event["msg_id"],
|
||||
self._usr(), feedback_type))
|
||||
data = {}
|
||||
reactor.callFromThread(
|
||||
self._run_and_pprint,
|
||||
"PUT",
|
||||
path,
|
||||
data=data,
|
||||
alt_text="Sent receipt for %s" % event["msg_id"],
|
||||
)
|
||||
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data,
|
||||
alt_text="Sent receipt for %s" % event["msg_id"])
|
||||
|
||||
def _do_membership_change(self, roomid, membership, userid):
|
||||
path = "/rooms/%s/state/m.room.member/%s" % (
|
||||
urllib.quote(roomid),
|
||||
urllib.quote(userid),
|
||||
)
|
||||
data = {"membership": membership}
|
||||
path = "/rooms/%s/state/m.room.member/%s" % (urllib.quote(roomid), urllib.quote(userid))
|
||||
data = {
|
||||
"membership": membership
|
||||
}
|
||||
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
|
||||
|
||||
def do_displayname(self, line):
|
||||
@@ -690,7 +612,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
self._do_presence_state(2, line)
|
||||
|
||||
def _parse(self, line, keys, force_keys=False):
|
||||
"""Parses the given line.
|
||||
""" Parses the given line.
|
||||
|
||||
Args:
|
||||
line : The line to parse
|
||||
@@ -707,21 +629,16 @@ class SynapseCmd(cmd.Cmd):
|
||||
for i, arg in enumerate(line_args):
|
||||
for config_key in self.config:
|
||||
if ("$" + config_key) in arg:
|
||||
arg = arg.replace("$" + config_key, self.config[config_key])
|
||||
arg = arg.replace("$" + config_key,
|
||||
self.config[config_key])
|
||||
line_args[i] = arg
|
||||
|
||||
return dict(zip(keys, line_args))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _run_and_pprint(
|
||||
self,
|
||||
method,
|
||||
path,
|
||||
data=None,
|
||||
query_params: Optional[dict] = None,
|
||||
alt_text=None,
|
||||
):
|
||||
"""Runs an HTTP request and pretty prints the output.
|
||||
def _run_and_pprint(self, method, path, data=None,
|
||||
query_params={"access_token": None}, alt_text=None):
|
||||
""" Runs an HTTP request and pretty prints the output.
|
||||
|
||||
Args:
|
||||
method: HTTP method
|
||||
@@ -729,37 +646,35 @@ class SynapseCmd(cmd.Cmd):
|
||||
data: Raw JSON data if any
|
||||
query_params: dict of query parameters to add to the url
|
||||
"""
|
||||
query_params = query_params or {"access_token": None}
|
||||
|
||||
url = self._url() + path
|
||||
if "access_token" in query_params:
|
||||
query_params["access_token"] = self._tok()
|
||||
|
||||
json_res = yield self.http_client.do_request(
|
||||
method, url, data=data, qparams=query_params
|
||||
)
|
||||
json_res = yield self.http_client.do_request(method, url,
|
||||
data=data,
|
||||
qparams=query_params)
|
||||
if alt_text:
|
||||
print(alt_text)
|
||||
print alt_text
|
||||
else:
|
||||
print(json.dumps(json_res, indent=4))
|
||||
print json.dumps(json_res, indent=4)
|
||||
|
||||
|
||||
def save_config(config):
|
||||
with open(CONFIG_JSON, "w") as out:
|
||||
with open(CONFIG_JSON, 'w') as out:
|
||||
json.dump(config, out)
|
||||
|
||||
|
||||
def main(server_url, identity_server_url, username, token, config_path):
|
||||
print("Synapse command line client")
|
||||
print("===========================")
|
||||
print("Server: %s" % server_url)
|
||||
print("Type 'help' to get started.")
|
||||
print("Close this console with CTRL+C then CTRL+D.")
|
||||
print "Synapse command line client"
|
||||
print "==========================="
|
||||
print "Server: %s" % server_url
|
||||
print "Type 'help' to get started."
|
||||
print "Close this console with CTRL+C then CTRL+D."
|
||||
if not username or not token:
|
||||
print("- 'register <username>' - Register an account")
|
||||
print("- 'stream' - Connect to the event stream")
|
||||
print("- 'create <roomid>' - Create a room")
|
||||
print("- 'send <roomid> <message>' - Send a message")
|
||||
print "- 'register <username>' - Register an account"
|
||||
print "- 'stream' - Connect to the event stream"
|
||||
print "- 'create <roomid>' - Create a room"
|
||||
print "- 'send <roomid> <message>' - Send a message"
|
||||
http_client = TwistedHttpClient()
|
||||
|
||||
# the command line client
|
||||
@@ -769,14 +684,14 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
global CONFIG_JSON
|
||||
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
|
||||
try:
|
||||
with open(config_path, "r") as config:
|
||||
with open(config_path, 'r') as config:
|
||||
syn_cmd.config = json.load(config)
|
||||
try:
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
except Exception:
|
||||
except:
|
||||
pass
|
||||
print("Loaded config from %s" % config_path)
|
||||
except Exception:
|
||||
print "Loaded config from %s" % config_path
|
||||
except:
|
||||
pass
|
||||
|
||||
# Twisted-specific: Runs the command processor in Twisted's event loop
|
||||
@@ -786,37 +701,27 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
reactor.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser("Starts a synapse client.")
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--server",
|
||||
dest="server",
|
||||
default="http://localhost:8008",
|
||||
help="The URL of the home server to talk to.",
|
||||
)
|
||||
"-s", "--server", dest="server", default="http://localhost:8008",
|
||||
help="The URL of the home server to talk to.")
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--identity-server",
|
||||
dest="identityserver",
|
||||
default="http://localhost:8090",
|
||||
help="The URL of the identity server to talk to.",
|
||||
)
|
||||
"-i", "--identity-server", dest="identityserver", default="http://localhost:8090",
|
||||
help="The URL of the identity server to talk to.")
|
||||
parser.add_argument(
|
||||
"-u", "--username", dest="username", help="Your username on the server."
|
||||
)
|
||||
parser.add_argument("-t", "--token", dest="token", help="Your access token.")
|
||||
"-u", "--username", dest="username",
|
||||
help="Your username on the server.")
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
dest="config",
|
||||
default=CONFIG_JSON,
|
||||
help="The location of the config.json file to read from.",
|
||||
)
|
||||
"-t", "--token", dest="token",
|
||||
help="Your access token.")
|
||||
parser.add_argument(
|
||||
"-c", "--config", dest="config", default=CONFIG_JSON,
|
||||
help="The location of the config.json file to read from.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server:
|
||||
print("You must supply a server URL to communicate with.")
|
||||
print "You must supply a server URL to communicate with."
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,21 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import urllib
|
||||
from pprint import pformat
|
||||
from typing import Optional
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.client import Agent, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
|
||||
class HttpClient:
|
||||
"""Interface for talking json over http"""
|
||||
class HttpClient(object):
|
||||
""" Interface for talking json over http
|
||||
"""
|
||||
|
||||
def put_json(self, url, data):
|
||||
"""Sends the specifed json data using PUT
|
||||
""" Sends the specifed json data using PUT
|
||||
|
||||
Args:
|
||||
url (str): The URL to PUT data to.
|
||||
@@ -34,13 +36,15 @@ class HttpClient:
|
||||
the request body. This will be encoded as JSON.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
"""Gets some json from the given host homeserver and path
|
||||
""" Get's some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -50,14 +54,16 @@ class HttpClient:
|
||||
and *not* a string.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class TwistedHttpClient(HttpClient):
|
||||
"""Wrapper around the twisted HTTP client api.
|
||||
""" Wrapper around the twisted HTTP client api.
|
||||
|
||||
Attributes:
|
||||
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
||||
@@ -70,7 +76,9 @@ class TwistedHttpClient(HttpClient):
|
||||
@defer.inlineCallbacks
|
||||
def put_json(self, url, data):
|
||||
response = yield self._create_put_request(
|
||||
url, data, headers_dict={"Content-Type": ["application/json"]}
|
||||
url,
|
||||
data,
|
||||
headers_dict={"Content-Type": ["application/json"]}
|
||||
)
|
||||
body = yield readBody(response)
|
||||
defer.returnValue((response.code, body))
|
||||
@@ -85,46 +93,45 @@ class TwistedHttpClient(HttpClient):
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
headers_dict = headers_dict or {}
|
||||
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a PUT request
|
||||
"""
|
||||
|
||||
if "Content-Type" not in headers_dict:
|
||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||
raise defer.error(
|
||||
RuntimeError("Must include Content-Type header for PUTs"))
|
||||
|
||||
return self._create_request(
|
||||
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
|
||||
"PUT",
|
||||
url,
|
||||
producer=_JsonProducer(json_data),
|
||||
headers_dict=headers_dict
|
||||
)
|
||||
|
||||
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a GET request"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict or {})
|
||||
def _create_get_request(self, url, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a GET request
|
||||
"""
|
||||
return self._create_request(
|
||||
"GET",
|
||||
url,
|
||||
headers_dict=headers_dict
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request(
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
data=None,
|
||||
qparams=None,
|
||||
jsonreq=True,
|
||||
headers: Optional[dict] = None,
|
||||
):
|
||||
headers = headers or {}
|
||||
|
||||
def do_request(self, method, url, data=None, qparams=None, jsonreq=True, headers={}):
|
||||
if qparams:
|
||||
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
|
||||
|
||||
if jsonreq:
|
||||
prod = _JsonProducer(data)
|
||||
headers["Content-Type"] = ["application/json"]
|
||||
headers['Content-Type'] = ["application/json"];
|
||||
else:
|
||||
prod = _RawProducer(data)
|
||||
|
||||
if method in ["POST", "PUT"]:
|
||||
response = yield self._create_request(
|
||||
method, url, producer=prod, headers_dict=headers
|
||||
)
|
||||
response = yield self._create_request(method, url,
|
||||
producer=prod,
|
||||
headers_dict=headers)
|
||||
else:
|
||||
response = yield self._create_request(method, url)
|
||||
|
||||
@@ -132,33 +139,33 @@ class TwistedHttpClient(HttpClient):
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(
|
||||
self, method, url, producer=None, headers_dict: Optional[dict] = None
|
||||
):
|
||||
"""Creates and sends a request to the given url"""
|
||||
headers_dict = headers_dict or {}
|
||||
|
||||
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||
""" Creates and sends a request to the given url
|
||||
"""
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
print("%s to %s with headers %s" % (method, url, headers_dict))
|
||||
print "%s to %s with headers %s" % (method, url, headers_dict)
|
||||
if self.verbose and producer:
|
||||
if "password" in producer.data:
|
||||
temp = producer.data["password"]
|
||||
producer.data["password"] = "[REDACTED]"
|
||||
print(json.dumps(producer.data, indent=4))
|
||||
print json.dumps(producer.data, indent=4)
|
||||
producer.data["password"] = temp
|
||||
else:
|
||||
print(json.dumps(producer.data, indent=4))
|
||||
print json.dumps(producer.data, indent=4)
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = yield self.agent.request(
|
||||
method, url.encode("UTF8"), Headers(headers_dict), producer
|
||||
method,
|
||||
url.encode("UTF8"),
|
||||
Headers(headers_dict),
|
||||
producer
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
print("uh oh: %s" % e)
|
||||
print "uh oh: %s" % e
|
||||
if retries_left:
|
||||
yield self.sleep(2 ** (5 - retries_left))
|
||||
retries_left -= 1
|
||||
@@ -166,8 +173,8 @@ class TwistedHttpClient(HttpClient):
|
||||
raise e
|
||||
|
||||
if self.verbose:
|
||||
print("Status %s %s" % (response.code, response.phrase))
|
||||
print(pformat(list(response.headers.getAllRawHeaders())))
|
||||
print "Status %s %s" % (response.code, response.phrase)
|
||||
print pformat(list(response.headers.getAllRawHeaders()))
|
||||
defer.returnValue(response)
|
||||
|
||||
def sleep(self, seconds):
|
||||
@@ -175,8 +182,7 @@ class TwistedHttpClient(HttpClient):
|
||||
reactor.callLater(seconds, d.callback, seconds)
|
||||
return d
|
||||
|
||||
|
||||
class _RawProducer:
|
||||
class _RawProducer(object):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.body = data
|
||||
@@ -192,10 +198,9 @@ class _RawProducer:
|
||||
def stopProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
class _JsonProducer:
|
||||
"""Used by the twisted http client to create the HTTP body from json"""
|
||||
|
||||
class _JsonProducer(object):
|
||||
""" Used by the twisted http client to create the HTTP body from json
|
||||
"""
|
||||
def __init__(self, jsn):
|
||||
self.data = jsn
|
||||
self.body = json.dumps(jsn).encode("utf8")
|
||||
@@ -209,4 +214,4 @@ class _JsonProducer:
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
pass
|
||||
@@ -1,10 +0,0 @@
|
||||
Community Contributions
|
||||
=======================
|
||||
|
||||
Everything in this directory are projects submitted by the community that may be useful
|
||||
to others. As such, the project maintainers cannot guarantee support, stability
|
||||
or backwards compatibility of these projects.
|
||||
|
||||
Files in this directory should *not* be relied on directly, as they may not
|
||||
continue to work or exist in future. If you wish to use any of these files then
|
||||
they should be copied to avoid them breaking from underneath you.
|
||||
@@ -1,32 +0,0 @@
|
||||
|
||||
# Synapse Docker
|
||||
|
||||
### Configuration
|
||||
|
||||
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||
reverse proxying and other artifacts. The docker-compose file is an example,
|
||||
please comment/uncomment sections that are not suitable for your usecase.
|
||||
|
||||
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||
to use manual configuration.
|
||||
|
||||
To generate a fresh `homeserver.yaml`, you can use the `generate` command.
|
||||
(See the [documentation](../../docker/README.md#generating-a-configuration-file)
|
||||
for more information.) You will need to specify appropriate values for at least the
|
||||
`SYNAPSE_SERVER_NAME` and `SYNAPSE_REPORT_STATS` environment variables. For example:
|
||||
|
||||
```
|
||||
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host -e SYNAPSE_REPORT_STATS=yes synapse generate
|
||||
```
|
||||
|
||||
(This will also generate necessary signing keys.)
|
||||
|
||||
Then, customize your configuration and run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
@@ -1,66 +0,0 @@
|
||||
# This compose file is compatible with Compose itself, it might need some
|
||||
# adjustments to run properly with stack.
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: docker/Dockerfile
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since synapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
|
||||
environment:
|
||||
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
# - /path/to/ssd:/data/uploads
|
||||
# - /path/to/large_hdd:/data/media
|
||||
depends_on:
|
||||
- db
|
||||
# In order to expose Synapse, remove one of the following, you might for
|
||||
# instance expose the TLS port directly:
|
||||
ports:
|
||||
- 8448:8448/tcp
|
||||
# ... or use a reverse proxy, here is an example for traefik:
|
||||
labels:
|
||||
# The following lines are valid for Traefik version 1.x:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=Host:my.matrix.Host
|
||||
- traefik.port=8008
|
||||
# Alternatively, for Traefik version 2.0:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.http-synapse.entryPoints=http
|
||||
- traefik.http.routers.http-synapse.rule=Host(`my.matrix.host`)
|
||||
- traefik.http.middlewares.https_redirect.redirectscheme.scheme=https
|
||||
- traefik.http.middlewares.https_redirect.redirectscheme.permanent=true
|
||||
- traefik.http.routers.http-synapse.middlewares=https_redirect
|
||||
- traefik.http.routers.https-synapse.entryPoints=https
|
||||
- traefik.http.routers.https-synapse.rule=Host(`my.matrix.host`)
|
||||
- traefik.http.routers.https-synapse.service=synapse
|
||||
- traefik.http.routers.https-synapse.tls=true
|
||||
- traefik.http.services.synapse.loadbalancer.server.port=8008
|
||||
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:12-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
# ensure the database gets created correctly
|
||||
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postgresql/data
|
||||
@@ -1,125 +0,0 @@
|
||||
# Setting up Synapse with Workers using Docker Compose
|
||||
|
||||
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
|
||||
|
||||
Example worker configuration files can be found [here](workers).
|
||||
|
||||
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
|
||||
|
||||
An example Docker Compose file can be found [here](docker-compose.yaml).
|
||||
|
||||
## Worker Service Examples in Docker Compose
|
||||
|
||||
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
|
||||
|
||||
### Generic Worker Example
|
||||
|
||||
```yaml
|
||||
synapse-generic-worker-1:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: synapse-generic-worker-1
|
||||
restart: unless-stopped
|
||||
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
|
||||
start_period: "5s"
|
||||
interval: "15s"
|
||||
timeout: "5s"
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
|
||||
environment:
|
||||
SYNAPSE_WORKER: synapse.app.generic_worker
|
||||
# Expose port if required so your reverse proxy can send requests to this worker
|
||||
# Port configuration will depend on how the http listener is defined in the worker configuration file
|
||||
ports:
|
||||
- 8081:8081
|
||||
depends_on:
|
||||
- synapse
|
||||
```
|
||||
|
||||
### Federation Sender Example
|
||||
|
||||
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
|
||||
|
||||
```yaml
|
||||
synapse-federation-sender-1:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: synapse-federation-sender-1
|
||||
restart: unless-stopped
|
||||
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
|
||||
healthcheck:
|
||||
disable: true
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
|
||||
environment:
|
||||
SYNAPSE_WORKER: synapse.app.federation_sender
|
||||
depends_on:
|
||||
- synapse
|
||||
```
|
||||
|
||||
## `homeserver.yaml` Configuration
|
||||
|
||||
### Enable Redis
|
||||
|
||||
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
|
||||
|
||||
```yaml
|
||||
redis:
|
||||
enabled: true
|
||||
host: redis
|
||||
port: 6379
|
||||
# password: <secret_password>
|
||||
```
|
||||
|
||||
This assumes that your Redis service is called `redis` in your Docker Compose file.
|
||||
|
||||
### Add a replication Listener
|
||||
|
||||
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
# Other listeners
|
||||
|
||||
- port: 9093
|
||||
type: http
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
|
||||
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
|
||||
|
||||
```yaml
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
|
||||
### Add Workers to `instance_map`
|
||||
|
||||
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
|
||||
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
|
||||
port: 8034 # The port assigned to the replication listener in your worker config file
|
||||
synapse-federation-sender-1:
|
||||
host: synapse-federation-sender-1
|
||||
port: 8034
|
||||
```
|
||||
|
||||
### Configure Federation Senders
|
||||
|
||||
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
|
||||
|
||||
```yaml
|
||||
# This will disable federation sending on the main Synapse instance
|
||||
send_federation: false
|
||||
|
||||
federation_sender_instances:
|
||||
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
|
||||
```
|
||||
|
||||
## Other Worker types
|
||||
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
@@ -1,77 +0,0 @@
|
||||
networks:
|
||||
backend:
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
|
||||
networks:
|
||||
- backend
|
||||
environment:
|
||||
POSTGRES_DB: synapse
|
||||
POSTGRES_USER: synapse_user
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend
|
||||
|
||||
synapse:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: synapse
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/data:/data:rw
|
||||
ports:
|
||||
- 8008:8008
|
||||
networks:
|
||||
- backend
|
||||
environment:
|
||||
SYNAPSE_CONFIG_DIR: /data
|
||||
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
|
||||
depends_on:
|
||||
- postgres
|
||||
|
||||
synapse-generic-worker-1:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: synapse-generic-worker-1
|
||||
restart: unless-stopped
|
||||
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
|
||||
start_period: "5s"
|
||||
interval: "15s"
|
||||
timeout: "5s"
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
|
||||
environment:
|
||||
SYNAPSE_WORKER: synapse.app.generic_worker
|
||||
# Expose port if required so your reverse proxy can send requests to this worker
|
||||
# Port configuration will depend on how the http listener is defined in the worker configuration file
|
||||
ports:
|
||||
- 8081:8081
|
||||
depends_on:
|
||||
- synapse
|
||||
|
||||
synapse-federation-sender-1:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: synapse-federation-sender-1
|
||||
restart: unless-stopped
|
||||
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
|
||||
healthcheck:
|
||||
disable: true
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
|
||||
environment:
|
||||
SYNAPSE_WORKER: synapse.app.federation_sender
|
||||
depends_on:
|
||||
- synapse
|
||||
@@ -1,14 +0,0 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: synapse-federation-sender-1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /data/federation_sender.log.config
|
||||
@@ -1,19 +0,0 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: synapse-generic-worker-1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
- type: http
|
||||
port: 8081
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /data/worker.log.config
|
||||
@@ -1,51 +0,0 @@
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
fmt:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
# example output to console
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: fmt
|
||||
filters: [context]
|
||||
|
||||
# example output to file - to enable, edit 'root' config below.
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: fmt
|
||||
filename: /var/log/synapse/homeserver.log
|
||||
maxBytes: 100000000
|
||||
backupCount: 3
|
||||
filters: [context]
|
||||
encoding: utf8
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
# example of enabling debugging for a component:
|
||||
#
|
||||
# synapse.federation.transport.server:
|
||||
# level: DEBUG
|
||||
@@ -1,6 +0,0 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up required recording rules. [contrib/prometheus](../prometheus)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,157 +0,0 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import html
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
import pydot
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
|
||||
"""
|
||||
Generate a dot and SVG file for a graph of events in the room based on the
|
||||
topological ordering by reading from a Synapse SQLite database.
|
||||
"""
|
||||
conn = sqlite3.connect(db_name)
|
||||
|
||||
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
|
||||
c = conn.execute(sql, (room_id,))
|
||||
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
|
||||
|
||||
sql = (
|
||||
"SELECT json, internal_metadata FROM event_json as j "
|
||||
"INNER JOIN events as e ON e.event_id = j.event_id "
|
||||
"WHERE j.room_id = ?"
|
||||
)
|
||||
|
||||
args = [room_id]
|
||||
|
||||
if limit:
|
||||
sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
|
||||
|
||||
args.append(limit)
|
||||
|
||||
c = conn.execute(sql, args)
|
||||
|
||||
events = [
|
||||
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
|
||||
for e in c.fetchall()
|
||||
]
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
node_map = {}
|
||||
state_groups = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
c = conn.execute(
|
||||
"SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
|
||||
(event.event_id,),
|
||||
)
|
||||
|
||||
res = c.fetchone()
|
||||
state_group = res[0] if res else None
|
||||
|
||||
if state_group is not None:
|
||||
state_groups.setdefault(state_group, []).append(event.event_id)
|
||||
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime("%Y-%m-%d %H:%M:%S,%f")
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]))
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
"State group: %(state_group)s<br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": html.escape(content, quote=True),
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
"state_group": state_group,
|
||||
}
|
||||
|
||||
node = pydot.Node(name=event.event_id, label=label)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
for event in events:
|
||||
for prev_id in event.prev_event_ids():
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except Exception:
|
||||
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
for group, event_ids in state_groups.items():
|
||||
if len(event_ids) <= 1:
|
||||
continue
|
||||
|
||||
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
|
||||
|
||||
for event_id in event_ids:
|
||||
cluster.add_node(node_map[event_id])
|
||||
|
||||
graph.add_subgraph(cluster)
|
||||
|
||||
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
|
||||
graph.write_svg("%s.svg" % file_prefix, prog="dot")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by talking "
|
||||
"to the given Synapse SQLite file to get the list of PDUs. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--prefix",
|
||||
dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output",
|
||||
)
|
||||
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
|
||||
parser.add_argument("db")
|
||||
parser.add_argument("room")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.db, args.room, args.prefix, args.limit)
|
||||
@@ -1,155 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import html
|
||||
import json
|
||||
|
||||
import pydot
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
|
||||
"""
|
||||
Generate a dot and SVG file for a graph of events in the room based on the
|
||||
topological ordering by reading line-delimited JSON from a file.
|
||||
"""
|
||||
print("Reading lines")
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print("Read lines")
|
||||
|
||||
# Figure out the room version, assume the first line is the create event.
|
||||
room_version = KNOWN_ROOM_VERSIONS[
|
||||
json.loads(lines[0]).get("content", {}).get("room_version")
|
||||
]
|
||||
|
||||
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
|
||||
|
||||
print("Loaded events.")
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print("Sorted events")
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit) :]
|
||||
|
||||
node_map = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime("%Y-%m-%d %H:%M:%S,%f")
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print(content)
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, str):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
content.append(
|
||||
"<b>%s</b>: %s,"
|
||||
% (
|
||||
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
|
||||
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
|
||||
)
|
||||
)
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print(content)
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": content,
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
}
|
||||
|
||||
node = pydot.Node(name=event.event_id, label=label)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print("Created Nodes")
|
||||
|
||||
for event in events:
|
||||
for prev_id in event.prev_event_ids():
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except Exception:
|
||||
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print("Created edges")
|
||||
|
||||
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
|
||||
|
||||
print("Created Dot")
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog="dot")
|
||||
|
||||
print("Created svg")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by reading "
|
||||
"from a file with line deliminated events. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--prefix",
|
||||
dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output",
|
||||
)
|
||||
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
|
||||
parser.add_argument("event_file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.event_file, args.prefix, args.limit)
|
||||
@@ -1,48 +0,0 @@
|
||||
This directory contains some sample monitoring config for using the
|
||||
'Prometheus' monitoring server against synapse.
|
||||
|
||||
To use it, first install prometheus by following the instructions at
|
||||
|
||||
http://prometheus.io/
|
||||
|
||||
### for Prometheus v1
|
||||
|
||||
Add a new job to the main prometheus.conf file:
|
||||
|
||||
```yaml
|
||||
job: {
|
||||
name: "synapse"
|
||||
|
||||
target_group: {
|
||||
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### for Prometheus v2
|
||||
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
# when endpoint uses https:
|
||||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
```
|
||||
|
||||
An example of a Prometheus configuration with workers can be found in
|
||||
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
```yaml
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
```
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||
command-line option.
|
||||
@@ -1,378 +0,0 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>System Resources</h1>
|
||||
|
||||
<h3>CPU</h3>
|
||||
<div id="process_resource_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "%",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resident_memory_bytes"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resident_memory_bytes"),
|
||||
expr: "process_resident_memory_bytes",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "bytes",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>File descriptors</h3>
|
||||
<div id="process_fds"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "",
|
||||
yTitle: "Descriptors"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Reactor</h1>
|
||||
|
||||
<h3>Total reactor time</h3>
|
||||
<div id="reactor_total_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average reactor tick time</h3>
|
||||
<div id="reactor_average_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Storage</h1>
|
||||
|
||||
<h3>Queries</h3>
|
||||
<div id="synapse_storage_query_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "queries/s",
|
||||
yTitle: "Queries"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transactions</h3>
|
||||
<div id="synapse_storage_transaction_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "txn/s",
|
||||
yTitle: "Transactions"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_sec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_sec"),
|
||||
expr: "rate(synapse_storage_transaction_time_sum[2m])",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average time waiting for database connection</h3>
|
||||
<div id="synapse_storage_avg_waiting_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_avg_waiting_time"),
|
||||
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache request rate</h3>
|
||||
<div id="synapse_cache_request_rate"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_request_rate"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m])",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "rps",
|
||||
yTitle: "Cache request rate"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache size</h3>
|
||||
<div id="synapse_cache_size"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Items"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Requests</h1>
|
||||
|
||||
<h3>Requests by Servlet</h3>
|
||||
<div id="synapse_http_server_request_count_servlet"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average response times</h3>
|
||||
<div id="synapse_http_server_response_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>All responses by code</h3>
|
||||
<div id="synapse_http_server_responses"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses"),
|
||||
expr: "rate(synapse_http_server_responses[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Error responses by code</h3>
|
||||
<div id="synapse_http_server_responses_err"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="synapse_http_server_response_ru_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>DB Usage</h3>
|
||||
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "DB Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>Average event send times</h3>
|
||||
<div id="synapse_http_server_send_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Federation</h1>
|
||||
|
||||
<h3>Sent Messages</h3>
|
||||
<div id="synapse_federation_client_sent"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Received Messages</h3>
|
||||
<div id="synapse_federation_server_received"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Pending</h3>
|
||||
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||
expr: "synapse_federation_transaction_queue_pending",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Units"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Clients</h1>
|
||||
|
||||
<h3>Notifiers</h3>
|
||||
<div id="synapse_notifier_listeners"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Listeners"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Notified Events</h3>
|
||||
<div id="synapse_notifier_notified_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
yTitle: "Event rate"
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
@@ -1,21 +0,0 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,78 +0,0 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
Purge history API examples
|
||||
==========================
|
||||
|
||||
# `purge_history.sh`
|
||||
|
||||
A bash file, that uses the
|
||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
||||
to purge all messages in a list of rooms up to a certain event. You can select a
|
||||
timeframe or a number of messages that you want to keep in the room.
|
||||
|
||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||
the script.
|
||||
|
||||
# `purge_remote_media.sh`
|
||||
|
||||
A bash file, that uses the
|
||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
||||
to purge all old cached remote media.
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
|
||||
#
|
||||
# It will purge all messages in a list of rooms up to a cetrain event
|
||||
|
||||
###################################################################################################
|
||||
# define your domain and admin user
|
||||
###################################################################################################
|
||||
# add this user as admin in your home server:
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
###################################################################################################
|
||||
#choose the rooms to prune old messages from (add a free comment at the end)
|
||||
###################################################################################################
|
||||
# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
|
||||
ROOMS_ARRAY=(
|
||||
'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
|
||||
'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
|
||||
)
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# you can select all the rooms that are not encrypted and loop over the result:
|
||||
# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
|
||||
# or
|
||||
# select all rooms with at least 100 members:
|
||||
# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
|
||||
# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
|
||||
|
||||
###################################################################################################
|
||||
# evaluate the EVENT_ID before which should be pruned
|
||||
###################################################################################################
|
||||
# choose a time before which the messages should be pruned:
|
||||
TIME='12 months ago'
|
||||
# ALTERNATIVELY:
|
||||
# a certain time:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# prune all messages that are older than 1000 messages ago:
|
||||
# LAST_MESSAGES=1000
|
||||
# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# select the EVENT_ID manually:
|
||||
#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
|
||||
|
||||
###################################################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###################################################################################################
|
||||
# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###################################################################################################
|
||||
# get an access token
|
||||
###################################################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
AUTH="Authorization: Bearer $TOKEN"
|
||||
|
||||
###################################################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###################################################################################################
|
||||
# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###################################################################################################
|
||||
# finally start pruning the room:
|
||||
###################################################################################################
|
||||
# this will really delete local events, so the messages in the room really
|
||||
# disappear unless they are restored by remote federation. This is because
|
||||
# we pass {"delete_local_events":true} to the curl invocation below.
|
||||
|
||||
for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
echo "########################################### $(date) ################# "
|
||||
echo "pruning room: $ROOM ..."
|
||||
ROOM=${ROOM%#*}
|
||||
#set -x
|
||||
echo "check for alias in db..."
|
||||
# for postgres:
|
||||
sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
|
||||
echo "get event..."
|
||||
# for postgres:
|
||||
EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
|
||||
if [ "$EVENT_ID" == "" ]; then
|
||||
echo "no event $TIME"
|
||||
else
|
||||
echo "event: $EVENT_ID"
|
||||
SLEEP=2
|
||||
set -x
|
||||
# call purge
|
||||
OUT=$(curl --header "$AUTH" -s -d '{"delete_local_events":true}' POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
|
||||
if [ "$PURGE_ID" == "" ]; then
|
||||
# probably the history purge is already in progress for $ROOM
|
||||
: "continuing with next room"
|
||||
else
|
||||
while : ; do
|
||||
# get status of purge and sleep longer each time if still active
|
||||
sleep $SLEEP
|
||||
STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
|
||||
: "$ROOM --> Status: $STATUS"
|
||||
[[ "$STATUS" == "active" ]] || break
|
||||
SLEEP=$((SLEEP + 1))
|
||||
done
|
||||
fi
|
||||
set +x
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# additionally
|
||||
###################################################################################################
|
||||
# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
|
||||
# This can take a very long time (hours) and the client have to be stopped while you do so:
|
||||
# $ synctl stop
|
||||
# $ sqlite3 -line homeserver.db "vacuum;"
|
||||
# $ synctl start
|
||||
|
||||
# This could be set, so you don't need to prune every time after deleting some rows:
|
||||
# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
|
||||
# be cautious, it could make the database somewhat slow if there are a lot of deletions
|
||||
|
||||
exit
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
# choose a time before which the messages should be pruned:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
TIME='12 months ago'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###############################################################################
|
||||
# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###############################################################################
|
||||
# get an access token
|
||||
###############################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
|
||||
###############################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###############################################################################
|
||||
# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###############################################################################
|
||||
# optional check size before
|
||||
###############################################################################
|
||||
# echo calculate used storage before ...
|
||||
# du -shc ../.synapse/media_store/*
|
||||
|
||||
###############################################################################
|
||||
# finally start pruning media:
|
||||
###############################################################################
|
||||
set -x # for debugging the generated string
|
||||
curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
@@ -1,57 +0,0 @@
|
||||
name: matrix-synapse
|
||||
base: core18
|
||||
version: git
|
||||
summary: Reference Matrix homeserver
|
||||
description: |
|
||||
Synapse is the reference Matrix homeserver.
|
||||
Matrix is a federated and decentralised instant messaging and VoIP system.
|
||||
|
||||
grade: stable
|
||||
confinement: strict
|
||||
|
||||
apps:
|
||||
matrix-synapse:
|
||||
command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
|
||||
stop-command: synctl -c $SNAP_COMMON stop
|
||||
plugs: [network-bind, network]
|
||||
daemon: simple
|
||||
hash-password:
|
||||
command: hash_password
|
||||
generate-config:
|
||||
command: generate_config
|
||||
generate-signing-key:
|
||||
command: generate_signing_key
|
||||
register-new-matrix-user:
|
||||
command: register_new_matrix_user
|
||||
plugs: [network]
|
||||
synctl:
|
||||
command: synctl
|
||||
parts:
|
||||
matrix-synapse:
|
||||
source: .
|
||||
plugin: python
|
||||
python-version: python3
|
||||
python-packages:
|
||||
- '.[all]'
|
||||
- pip
|
||||
- setuptools
|
||||
- setuptools-scm
|
||||
- wheel
|
||||
build-packages:
|
||||
- libffi-dev
|
||||
- libturbojpeg0-dev
|
||||
- libssl-dev
|
||||
- libxslt1-dev
|
||||
- libpq-dev
|
||||
- zlib1g-dev
|
||||
stage-packages:
|
||||
- libasn1-8-heimdal
|
||||
- libgssapi3-heimdal
|
||||
- libhcrypto4-heimdal
|
||||
- libheimbase1-heimdal
|
||||
- libheimntlm0-heimdal
|
||||
- libhx509-5-heimdal
|
||||
- libkrb5-26-heimdal
|
||||
- libldap-2.4-2
|
||||
- libpq5
|
||||
- libsasl2-2
|
||||
@@ -1,3 +0,0 @@
|
||||
The documentation for using systemd to manage synapse workers is now part of
|
||||
the main synapse distribution. See
|
||||
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).
|
||||
@@ -1,18 +0,0 @@
|
||||
# Setup Synapse with Systemd
|
||||
This is a setup for managing synapse with a user contributed systemd unit
|
||||
file. It provides a `matrix-synapse` systemd unit file that should be tailored
|
||||
to accommodate your installation in accordance with the installation
|
||||
instructions provided in
|
||||
[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
||||
|
||||
## Setup
|
||||
1. Under the service section, ensure the `User` variable matches which user
|
||||
you installed synapse under and wish to run it as.
|
||||
2. Under the service section, ensure the `WorkingDirectory` variable matches
|
||||
where you have installed synapse.
|
||||
3. Under the service section, ensure the `ExecStart` variable matches the
|
||||
appropriate locations of your installation.
|
||||
4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
|
||||
5. Start Synapse: `sudo systemctl start matrix-synapse`
|
||||
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
|
||||
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
|
||||
@@ -1,25 +0,0 @@
|
||||
version: 1
|
||||
|
||||
# In systemd's journal, loglevel is implicitly stored, so let's omit it
|
||||
# from the message text.
|
||||
formatters:
|
||||
journal_fmt:
|
||||
format: '%(name)s: [%(request)s] %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
journal:
|
||||
class: systemd.journal.JournalHandler
|
||||
formatter: journal_fmt
|
||||
filters: [context]
|
||||
SYSLOG_IDENTIFIER: synapse
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [journal]
|
||||
|
||||
disable_existing_loggers: False
|
||||
@@ -1,39 +0,0 @@
|
||||
# Example systemd configuration file for synapse. Copy into
|
||||
# /etc/systemd/system/, update the paths if necessary, then:
|
||||
#
|
||||
# systemctl enable matrix-synapse
|
||||
# systemctl start matrix-synapse
|
||||
#
|
||||
# This assumes that Synapse has been installed by a user named
|
||||
# synapse.
|
||||
#
|
||||
# This assumes that Synapse has been installed in a virtualenv in
|
||||
# the user's home directory: `/home/synapse/synapse/env`.
|
||||
#
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
# If you are using postgresql to persist data, uncomment this line to make sure
|
||||
# synapse starts after the postgresql service.
|
||||
# After=postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=on-abort
|
||||
|
||||
User=synapse
|
||||
Group=nogroup
|
||||
|
||||
WorkingDirectory=/home/synapse/synapse
|
||||
ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
# adjust the cache factor if necessary
|
||||
# Environment=SYNAPSE_CACHE_FACTOR=2.0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,71 +0,0 @@
|
||||
[Service]
|
||||
# The following directives give the synapse service R/W access to:
|
||||
# - /run/matrix-synapse
|
||||
# - /var/lib/matrix-synapse
|
||||
# - /var/log/matrix-synapse
|
||||
|
||||
RuntimeDirectory=matrix-synapse
|
||||
StateDirectory=matrix-synapse
|
||||
LogsDirectory=matrix-synapse
|
||||
|
||||
######################
|
||||
## Security Sandbox ##
|
||||
######################
|
||||
|
||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
||||
# cannot see or change any real devices
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
|
||||
# We give no capabilities to a service by default
|
||||
CapabilityBoundingSet=
|
||||
AmbientCapabilities=
|
||||
|
||||
# Protect the following from modification:
|
||||
# - The entire filesystem
|
||||
# - sysctl settings and loaded kernel modules
|
||||
# - No modifications allowed to Control Groups
|
||||
# - Hostname
|
||||
# - System Clock
|
||||
ProtectSystem=strict
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
ProtectClock=true
|
||||
ProtectHostname=true
|
||||
|
||||
# Prevent access to the following:
|
||||
# - /home directory
|
||||
# - Kernel logs
|
||||
ProtectHome=tmpfs
|
||||
ProtectKernelLogs=true
|
||||
|
||||
# Make sure that the process can only see PIDs and process details of itself,
|
||||
# and the second option disables seeing details of things like system load and
|
||||
# I/O etc
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
|
||||
# While not needed, we set these options explicitly
|
||||
# - This process has been given access to the host network
|
||||
# - It can also communicate with any IP Address
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
IPAddressAllow=any
|
||||
|
||||
# Restrict system calls to a sane bunch
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged @resources @obsolete
|
||||
|
||||
# Misc restrictions
|
||||
# - Since the process is a python process it needs to be able to write and
|
||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
||||
RestrictSUIDSGID=true
|
||||
RemoveIPC=true
|
||||
NoNewPrivileges=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
LockPersonality=true
|
||||
PrivateUsers=true
|
||||
MemoryDenyWriteExecute=false
|
||||
2
contrib/vertobot/.gitignore
vendored
2
contrib/vertobot/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
vucbot.yaml
|
||||
vertobot.yaml
|
||||
@@ -1,309 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use 5.010; # //
|
||||
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||
use IO::Async::Loop;
|
||||
use Net::Async::WebSocket::Client;
|
||||
use Net::Async::Matrix 0.11_002;
|
||||
use JSON;
|
||||
use YAML;
|
||||
use Data::UUID;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
|
||||
binmode STDOUT, ":encoding(UTF-8)";
|
||||
binmode STDERR, ":encoding(UTF-8)";
|
||||
|
||||
my $loop = IO::Async::Loop->new;
|
||||
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||
ref $loop eq "IO::Async::Loop::Poll" and
|
||||
warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||
|
||||
GetOptions(
|
||||
'C|config=s' => \my $CONFIG,
|
||||
'eval-from=s' => \my $EVAL_FROM,
|
||||
) or exit 1;
|
||||
|
||||
if( defined $EVAL_FROM ) {
|
||||
# An emergency 'eval() this file' hack
|
||||
$SIG{HUP} = sub {
|
||||
my $code = do {
|
||||
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||
local $/; <$fh>
|
||||
};
|
||||
|
||||
eval $code or warn "Cannot eval() - $@";
|
||||
};
|
||||
}
|
||||
|
||||
defined $CONFIG or die "Must supply --config\n";
|
||||
|
||||
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||
|
||||
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||
# No harm in always applying this
|
||||
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||
|
||||
# Track every Room object, so we can ->leave them all on shutdown
|
||||
my %bot_matrix_rooms;
|
||||
|
||||
my $bridgestate = {};
|
||||
my $roomid_by_callid = {};
|
||||
|
||||
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||
on_frame => sub {
|
||||
my ( $self, $frame ) = @_;
|
||||
warn "[Verto] receiving $frame";
|
||||
on_verto_json($frame);
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_verto );
|
||||
|
||||
my $sessid = lc new Data::UUID->create_str();
|
||||
|
||||
my $bot_matrix = Net::Async::Matrix->new(
|
||||
%MATRIX_CONFIG,
|
||||
on_log => sub { warn "log: @_\n" },
|
||||
on_invite => sub {
|
||||
my ($matrix, $invite) = @_;
|
||||
warn "[Matrix] invited to: " . $invite->{room_id} . " by " . $invite->{inviter} . "\n";
|
||||
|
||||
$matrix->join_room( $invite->{room_id} )->get;
|
||||
},
|
||||
on_room_new => sub {
|
||||
my ($matrix, $room) = @_;
|
||||
|
||||
warn "[Matrix] have a room ID: " . $room->room_id . "\n";
|
||||
|
||||
$bot_matrix_rooms{$room->room_id} = $room;
|
||||
|
||||
# log in to verto on behalf of this room
|
||||
$bridgestate->{$room->room_id}->{sessid} = $sessid;
|
||||
|
||||
$room->configure(
|
||||
on_message => \&on_room_message,
|
||||
);
|
||||
|
||||
my $f = send_verto_json_request("login", {
|
||||
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||
'sessid' => $sessid,
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
|
||||
# we deliberately don't paginate the room, as we only care about
|
||||
# new calls
|
||||
},
|
||||
on_unknown_event => \&on_unknown_event,
|
||||
on_error => sub {
|
||||
print STDERR "Matrix failure: @_\n";
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_matrix );
|
||||
|
||||
sub on_unknown_event
|
||||
{
|
||||
my ($matrix, $event) = @_;
|
||||
print Dumper($event);
|
||||
|
||||
my $room_id = $event->{room_id};
|
||||
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||
|
||||
if ($event->{type} eq 'm.call.invite') {
|
||||
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||
# no trickle ICE in verto apparently
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.candidates') {
|
||||
# XXX: compare call IDs
|
||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
my $offer = $bridgestate->{$room_id}->{offer};
|
||||
my $candidate_block = {
|
||||
audio => '',
|
||||
video => '',
|
||||
};
|
||||
foreach (@{$event->{content}->{candidates}}) {
|
||||
if ($_->{sdpMid}) {
|
||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
else {
|
||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
}
|
||||
|
||||
# XXX: assumes audio comes first
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
||||
|
||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
# ignore them, as no trickle ICE, although we might as well
|
||||
# batch them up
|
||||
# foreach (@{$event->{content}->{candidates}}) {
|
||||
# push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||
# }
|
||||
}
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.hangup') {
|
||||
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||
my $f = send_verto_json_request("verto.bye", {
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn "Unhandled event: $event->{type}";
|
||||
}
|
||||
}
|
||||
|
||||
sub on_room_message
|
||||
{
|
||||
my ($room, $from, $content) = @_;
|
||||
my $room_id = $room->room_id;
|
||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||
}
|
||||
|
||||
Future->needs_all(
|
||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||
$bot_matrix->start;
|
||||
}),
|
||||
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
)->on_done( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
}),
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
PIPE => sub { warn "pipe\n" }
|
||||
);
|
||||
$loop->attach_signal(
|
||||
INT => sub { $loop->stop },
|
||||
);
|
||||
$loop->attach_signal(
|
||||
TERM => sub { $loop->stop },
|
||||
);
|
||||
|
||||
eval {
|
||||
$loop->run;
|
||||
} or my $e = $@;
|
||||
|
||||
# When the bot gets shut down, have it leave the rooms so it's clear to observers
|
||||
# that it is no longer running.
|
||||
# if( $CONFIG{"leave-on-shutdown"} // 1 ) {
|
||||
# print STDERR "Removing bot from Matrix rooms...\n";
|
||||
# Future->wait_all( map { $_->leave->else_done() } values %bot_matrix_rooms )->get;
|
||||
# }
|
||||
# else {
|
||||
# print STDERR "Leaving bot users in Matrix rooms.\n";
|
||||
# }
|
||||
|
||||
die $e if $e;
|
||||
|
||||
exit 0;
|
||||
|
||||
{
|
||||
my $json_id;
|
||||
my $requests;
|
||||
|
||||
sub send_verto_json_request
|
||||
{
|
||||
$json_id ||= 1;
|
||||
|
||||
my ($method, $params) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
method => $method,
|
||||
params => $params,
|
||||
id => $json_id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
my $request = $loop->new_future;
|
||||
$requests->{$json_id} = $request;
|
||||
$json_id++;
|
||||
return $request;
|
||||
}
|
||||
|
||||
sub send_verto_json_response
|
||||
{
|
||||
my ($result, $id) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
result => $result,
|
||||
id => $id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
}
|
||||
|
||||
sub on_verto_json
|
||||
{
|
||||
my $json = JSON->new->decode( $_[0] );
|
||||
if ($json->{method}) {
|
||||
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||
$json->{method} eq 'verto.media') {
|
||||
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
my $room = $bot_matrix_rooms{$room_id};
|
||||
|
||||
if ($json->{params}->{sdp}) {
|
||||
# HACK HACK HACK HACK
|
||||
$room->_do_POST_json( "/send/m.call.answer", {
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "answer",
|
||||
},
|
||||
})->then( sub {
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn ("[Verto] unhandled method: " . $json->{method});
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
}
|
||||
}
|
||||
elsif ($json->{result}) {
|
||||
$requests->{$json->{id}}->done($json->{result});
|
||||
}
|
||||
elsif ($json->{error}) {
|
||||
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,493 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use 5.010; # //
|
||||
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||
use IO::Async::Loop;
|
||||
use Net::Async::WebSocket::Client;
|
||||
use Net::Async::HTTP;
|
||||
use Net::Async::HTTP::Server;
|
||||
use JSON;
|
||||
use YAML;
|
||||
use Data::UUID;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
use URI::Encode qw(uri_encode uri_decode);
|
||||
|
||||
binmode STDOUT, ":encoding(UTF-8)";
|
||||
binmode STDERR, ":encoding(UTF-8)";
|
||||
|
||||
my $msisdn_to_matrix = {
|
||||
'447417892400' => '@matthew:matrix.org',
|
||||
};
|
||||
|
||||
my $matrix_to_msisdn = {};
|
||||
foreach (keys %$msisdn_to_matrix) {
|
||||
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
|
||||
}
|
||||
|
||||
|
||||
my $loop = IO::Async::Loop->new;
|
||||
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||
# ref $loop eq "IO::Async::Loop::Poll" and
|
||||
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||
|
||||
GetOptions(
|
||||
'C|config=s' => \my $CONFIG,
|
||||
'eval-from=s' => \my $EVAL_FROM,
|
||||
) or exit 1;
|
||||
|
||||
if( defined $EVAL_FROM ) {
|
||||
# An emergency 'eval() this file' hack
|
||||
$SIG{HUP} = sub {
|
||||
my $code = do {
|
||||
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||
local $/; <$fh>
|
||||
};
|
||||
|
||||
eval $code or warn "Cannot eval() - $@";
|
||||
};
|
||||
}
|
||||
|
||||
defined $CONFIG or die "Must supply --config\n";
|
||||
|
||||
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||
|
||||
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||
# No harm in always applying this
|
||||
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||
|
||||
my $bridgestate = {};
|
||||
my $roomid_by_callid = {};
|
||||
|
||||
my $sessid = lc new Data::UUID->create_str();
|
||||
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
|
||||
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
|
||||
|
||||
my $http = Net::Async::HTTP->new();
|
||||
$loop->add( $http );
|
||||
|
||||
sub create_virtual_user
|
||||
{
|
||||
my ($localpart) = @_;
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/register?".
|
||||
"access_token=$as_token&user_id=$localpart"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"type": "m.login.application_service",
|
||||
"user": "$localpart"
|
||||
}
|
||||
EOT
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
|
||||
my $http_server = Net::Async::HTTP::Server->new(
|
||||
on_request => sub {
|
||||
my $self = shift;
|
||||
my ( $req ) = @_;
|
||||
|
||||
my $response;
|
||||
my $path = uri_decode($req->path);
|
||||
warn("request: $path");
|
||||
if ($path =~ m#/users/\@(\+.*)#) {
|
||||
# when queried about virtual users, auto-create them in the HS
|
||||
my $localpart = $1;
|
||||
create_virtual_user($localpart);
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
elsif ($path =~ m#/transactions/(.*)#) {
|
||||
my $event = JSON->new->decode($req->body);
|
||||
print Dumper($event);
|
||||
|
||||
my $room_id = $event->{room_id};
|
||||
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||
|
||||
if ($event->{type} eq 'm.room.membership') {
|
||||
my $membership = $event->{content}->{membership};
|
||||
my $state_key = $event->{state_key};
|
||||
my $room_id = $event->{state_id};
|
||||
|
||||
if ($membership eq 'invite') {
|
||||
# autojoin invites
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/rooms/$room_id/join?".
|
||||
"access_token=$as_token&user_id=$state_key"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => "{}",
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.invite') {
|
||||
my $room_id = $event->{room_id};
|
||||
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||
my $offer = $event->{content}->{offer}->{sdp};
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||
# no trickle ICE in verto apparently
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
# elsif ($event->{type} eq 'm.call.candidates') {
|
||||
# # XXX: this could fire for both matrix->verto and verto->matrix calls
|
||||
# # and races as it collects candidates. much better to just turn off
|
||||
# # candidate gathering in the webclient entirely for now
|
||||
#
|
||||
# my $room_id = $event->{room_id};
|
||||
# # XXX: compare call IDs
|
||||
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
# my $offer = $bridgestate->{$room_id}->{offer};
|
||||
# my $candidate_block = "";
|
||||
# foreach (@{$event->{content}->{candidates}}) {
|
||||
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||
# }
|
||||
# # XXX: collate using the right m= line - for now assume audio call
|
||||
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||
#
|
||||
# my $f = send_verto_json_request("verto.invite", {
|
||||
# "sdp" => $offer,
|
||||
# "dialogParams" => \%dp,
|
||||
# "sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
# });
|
||||
# $self->adopt_future($f);
|
||||
# }
|
||||
# else {
|
||||
# # ignore them, as no trickle ICE, although we might as well
|
||||
# # batch them up
|
||||
# # foreach (@{$event->{content}->{candidates}}) {
|
||||
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||
# # }
|
||||
# }
|
||||
# }
|
||||
elsif ($event->{type} eq 'm.call.answer') {
|
||||
# grab the answer and relay it to verto as a verto.answer
|
||||
my $room_id = $event->{room_id};
|
||||
|
||||
my $answer = $event->{content}->{answer}->{sdp};
|
||||
my $f = send_verto_json_request("verto.answer", {
|
||||
"sdp" => $answer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.hangup') {
|
||||
my $room_id = $event->{room_id};
|
||||
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||
my $f = send_verto_json_request("verto.bye", {
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn "Unhandled event: $event->{type}";
|
||||
}
|
||||
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
else {
|
||||
warn "Unhandled path: $path";
|
||||
$response = HTTP::Response->new( 404 );
|
||||
}
|
||||
|
||||
$req->respond( $response );
|
||||
},
|
||||
);
|
||||
$loop->add( $http_server );
|
||||
|
||||
$http_server->listen(
|
||||
addr => { family => "inet", socktype => "stream", port => 8009 },
|
||||
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
|
||||
);
|
||||
|
||||
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||
on_frame => sub {
|
||||
my ( $self, $frame ) = @_;
|
||||
warn "[Verto] receiving $frame";
|
||||
on_verto_json($frame);
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_verto );
|
||||
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connected => sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
if (not $verto_connecting->is_done) {
|
||||
$verto_connecting->done($bot_verto);
|
||||
|
||||
send_verto_json_request("login", {
|
||||
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||
'sessid' => $sessid,
|
||||
});
|
||||
}
|
||||
},
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
);
|
||||
|
||||
# die Dumper($verto_connecting);
|
||||
|
||||
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
||||
|
||||
Future->needs_all(
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"as_token": "$as_token",
|
||||
"url": "$as_url",
|
||||
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
||||
}
|
||||
EOT
|
||||
)->then( sub{
|
||||
my ($response) = (@_);
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
return Future->done;
|
||||
}),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
PIPE => sub { warn "pipe\n" }
|
||||
);
|
||||
$loop->attach_signal(
|
||||
INT => sub { $loop->stop },
|
||||
);
|
||||
$loop->attach_signal(
|
||||
TERM => sub { $loop->stop },
|
||||
);
|
||||
|
||||
eval {
|
||||
$loop->run;
|
||||
} or my $e = $@;
|
||||
|
||||
die $e if $e;
|
||||
|
||||
exit 0;
|
||||
|
||||
{
|
||||
my $json_id;
|
||||
my $requests;
|
||||
|
||||
sub send_verto_json_request
|
||||
{
|
||||
$json_id ||= 1;
|
||||
|
||||
my ($method, $params) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
method => $method,
|
||||
params => $params,
|
||||
id => $json_id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
my $request = $loop->new_future;
|
||||
$requests->{$json_id} = $request;
|
||||
$json_id++;
|
||||
return $request;
|
||||
}
|
||||
|
||||
sub send_verto_json_response
|
||||
{
|
||||
my ($result, $id) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
result => $result,
|
||||
id => $id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
}
|
||||
|
||||
sub on_verto_json
|
||||
{
|
||||
my $json = JSON->new->decode( $_[0] );
|
||||
if ($json->{method}) {
|
||||
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||
$json->{method} eq 'verto.media') {
|
||||
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
if ($json->{params}->{sdp}) {
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.answer?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "answer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.invite') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
|
||||
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
|
||||
my $room_id;
|
||||
|
||||
# create a virtual user for the caller if needed.
|
||||
create_virtual_user($caller);
|
||||
|
||||
# create a room of form #peer-peer and invite the callee
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/createRoom?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
room_alias_name => $alias,
|
||||
invite => [ $callee_user ],
|
||||
}),
|
||||
)->then( sub {
|
||||
my ( $response ) = @_;
|
||||
my $resp = JSON->new->decode($response->content);
|
||||
$room_id = $resp->{room_id};
|
||||
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
|
||||
})->get;
|
||||
|
||||
# join it
|
||||
my ($response) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/join/$room_id?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => '{}',
|
||||
)->get;
|
||||
|
||||
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
|
||||
# put the m.call.invite in there
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.invite?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "offer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.bye') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
# put the m.call.hangup into the room
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.hangup?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
else {
|
||||
warn ("[Verto] unhandled method: " . $json->{method});
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
}
|
||||
}
|
||||
elsif ($json->{result}) {
|
||||
$requests->{$json->{id}}->done($json->{result});
|
||||
}
|
||||
elsif ($json->{error}) {
|
||||
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# Generic Matrix connection params
|
||||
matrix:
|
||||
server: 'matrix.org'
|
||||
SSL: 1
|
||||
|
||||
# Bot-user connection details
|
||||
matrix-bot:
|
||||
user_id: '@vertobot:matrix.org'
|
||||
password: ''
|
||||
domain: 'matrix.org"
|
||||
as_url: 'http://localhost:8009'
|
||||
as_token: 'vertobot123'
|
||||
|
||||
verto-bot:
|
||||
host: webrtc.freeswitch.org
|
||||
service: 8081
|
||||
url: "ws://webrtc.freeswitch.org:8081/"
|
||||
|
||||
verto-config:
|
||||
passwd: 1234
|
||||
|
||||
verto-dialog-params:
|
||||
useVideo: false
|
||||
useStereo: false
|
||||
tag: "webcam"
|
||||
login: "1008@webrtc.freeswitch.org"
|
||||
destination_number: "9664"
|
||||
caller_id_name: "FreeSWITCH User"
|
||||
caller_id_number: "1008"
|
||||
callID: ""
|
||||
remote_caller_id_name: "Outbound Call"
|
||||
remote_caller_id_number: "9664"
|
||||
@@ -1,14 +0,0 @@
|
||||
requires 'parent', 0;
|
||||
requires 'Future', '>= 0.29';
|
||||
requires 'Net::Async::Matrix', '>= 0.11_002';
|
||||
requires 'Net::Async::Matrix::Utils';
|
||||
requires 'Net::Async::WebSocket::Protocol', 0;
|
||||
requires 'Data::UUID', 0;
|
||||
requires 'IO::Async', '>= 0.63';
|
||||
requires 'IO::Async::SSL', 0;
|
||||
requires 'IO::Socket::SSL', 0;
|
||||
requires 'YAML', 0;
|
||||
requires 'JSON', 0;
|
||||
requires 'Getopt::Long', 0;
|
||||
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
# JSON is shown in *reverse* chronological order.
|
||||
# Send v. Receive is implicit.
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 7,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"message": "CALL ENDED",
|
||||
"causeCode": 16,
|
||||
"cause": "NORMAL_CLEARING",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.bye",
|
||||
"params": {
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 7
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 6,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"action": "toggleHold",
|
||||
"holdState": "active",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.modify",
|
||||
"params": {
|
||||
"action": "toggleHold",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 6
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 5,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"action": "toggleHold",
|
||||
"holdState": "held",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.modify",
|
||||
"params": {
|
||||
"action": "toggleHold",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 5
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 349819,
|
||||
"result": {
|
||||
"method": "verto.answer"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 349819,
|
||||
"method": "verto.answer",
|
||||
"params": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"sdp": "v=0\no=FreeSWITCH 1417101432 1417101433 IN IP4 209.105.235.10\ns=FreeSWITCH\nc=IN IP4 209.105.235.10\nt=0 0\na=msid-semantic: WMS jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\nm=audio 30134 RTP/SAVPF 111 126\na=rtpmap:111 opus/48000/2\na=fmtp:111 minptime=10; stereo=1\na=rtpmap:126 telephone-event/8000\na=silenceSupp:off - - - -\na=ptime:20\na=sendrecv\na=fingerprint:sha-256 F8:72:18:E9:72:89:99:22:5B:F8:B6:C6:C6:0D:C5:9B:B2:FB:BC:CA:8D:AB:13:8A:66:E1:37:38:A0:16:AA:41\na=rtcp-mux\na=rtcp:30134 IN IP4 209.105.235.10\na=ssrc:210967934 cname:rOIEajpw4FocakWY\na=ssrc:210967934 msid:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq a0\na=ssrc:210967934 mslabel:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\na=ssrc:210967934 label:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vqa0\na=ice-ufrag:OKwTmGLapwmxn7OF\na=ice-pwd:MmaMwq8rVmtWxfLbQ7U2Ew3T\na=candidate:2372654928 1 udp 659136 209.105.235.10 30134 typ host generation 0\n"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 4,
|
||||
"result": {
|
||||
"message": "CALL CREATED",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.invite",
|
||||
"params": {
|
||||
"sdp": "v=0\r\no=- 1381685806032722557 2 IN IP4 127.0.0.1\r\ns=-\r\nt=0 0\r\na=group:BUNDLE audio\r\na=msid-semantic: WMS 6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\nm=audio 63088 RTP/SAVPF 111 103 104 0 8 106 105 13 126\r\nc=IN IP4 81.138.8.249\r\na=rtcp:63088 IN IP4 81.138.8.249\r\na=candidate:460398169 1 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:460398169 2 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:3460887983 1 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:3460887983 2 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:945327227 1 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:945327227 2 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:1441981097 1 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:1441981097 2 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:2160789855 1 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=candidate:2160789855 2 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=ice-ufrag:cP4qeRhn0LpcpA88\r\na=ice-pwd:fREmgSkXsDLGUUH1bwfrBQhW\r\na=ice-options:google-ice\r\na=fingerprint:sha-256 AF:35:64:1B:62:8A:EF:27:AE:2B:88:2E:FE:78:29:0B:08:DA:64:6C:DE:02:57:E3:EE:B1:D7:86:B8:36:8F:B0\r\na=setup:actpass\r\na=mid:audio\r\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\na=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\na=sendrecv\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=fmtp:111 minptime=10; stereo=1\r\na=rtpmap:103 ISAC/16000\r\na=rtpmap:104 ISAC/32000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:106 CN/32000\r\na=rtpmap:105 CN/16000\r\na=rtpmap:13 CN/8000\r\na=rtpmap:126 telephone-event/8000\r\na=maxptime:60\r\na=ssrc:558827154 cname:vdKHBNqa17t2gmE3\r\na=ssrc:558827154 msid:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\na=ssrc:558827154 mslabel:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\na=ssrc:558827154 label:bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\n",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 4
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 3,
|
||||
"result": {
|
||||
"message": "logged in",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"message": "Authentication Required"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"passwd": "1234",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 3
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"message": "Authentication Required"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 2
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
# Creating multiple workers with a bash script
|
||||
|
||||
Setting up multiple worker configuration files manually can be time-consuming.
|
||||
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
for i in {1..5}
|
||||
do
|
||||
cat << EOF >> generic_worker$i.yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker$i
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
EOF
|
||||
done
|
||||
```
|
||||
|
||||
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
|
||||
|
||||
Customise the script to your needs.
|
||||
21
database-prepare-for-0.0.1.sh
Executable file
21
database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
7
debian/.gitignore
vendored
7
debian/.gitignore
vendored
@@ -1,7 +0,0 @@
|
||||
/matrix-synapse-py3.*.debhelper
|
||||
/matrix-synapse-py3.debhelper.log
|
||||
/matrix-synapse-py3.substvars
|
||||
/matrix-synapse-*/
|
||||
/files
|
||||
/debhelper-build-stamp
|
||||
/.debhelper
|
||||
32
debian/NEWS
vendored
32
debian/NEWS
vendored
@@ -1,32 +0,0 @@
|
||||
matrix-synapse-py3 (0.34.0) stable; urgency=medium
|
||||
|
||||
matrix-synapse-py3 is intended as a drop-in replacement for the existing
|
||||
matrix-synapse package. When the package is installed, matrix-synapse will be
|
||||
automatically uninstalled. The replacement should be relatively seamless,
|
||||
however, please note the following important differences to matrix-synapse:
|
||||
|
||||
* Most importantly, the matrix-synapse service now runs under Python 3 rather
|
||||
than Python 2.7.
|
||||
|
||||
* Synapse is installed into its own virtualenv (in /opt/venvs/matrix-synapse)
|
||||
instead of using the system python libraries. (This may mean that you can
|
||||
remove a number of old dependencies with `apt autoremove`).
|
||||
|
||||
* If you have previously manually installed any custom python extensions
|
||||
(such as matrix-synapse-rest-auth) into the system python directories, you
|
||||
will need to reinstall them in the new virtualenv. Please consult the
|
||||
documentation of the relevant extensions for further details.
|
||||
|
||||
matrix-synapse-py3 will take over responsibility for the existing
|
||||
configuration files, including the matrix-synapse systemd service.
|
||||
|
||||
Beware, however, that `apt purge matrix-synapse` will *disable* the
|
||||
matrix-synapse service (so that it will not be started on reboot), even
|
||||
though that service is no longer being provided by the matrix-synapse
|
||||
package. It can be re-enabled with `systemctl enable matrix-synapse`.
|
||||
|
||||
The matrix.org team will continue to provide Python 2 `matrix-synapse`
|
||||
packages for the next couple of releases, to allow time for system
|
||||
administrators to test the new packages.
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Wed, 19 Dec 2018 14:00:00 +0000
|
||||
134
debian/build_virtualenv
vendored
134
debian/build_virtualenv
vendored
@@ -1,134 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# runs dh_virtualenv to build the virtualenv in the build directory,
|
||||
# and then runs the trial tests against the installed synapse.
|
||||
|
||||
set -e
|
||||
|
||||
export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
|
||||
|
||||
# make sure that the virtualenv links to the specific version of python, by
|
||||
# dereferencing the python3 symlink.
|
||||
#
|
||||
# Otherwise, if somebody tries to install (say) the stretch package on buster,
|
||||
# they will get a confusing error about "No module named 'synapse'", because
|
||||
# python won't look in the right directory. At least this way, the error will
|
||||
# be a *bit* more obvious.
|
||||
#
|
||||
SNAKE=$(readlink -e /usr/bin/python3)
|
||||
|
||||
# try to set the CFLAGS so any compiled C extensions are compiled with the most
|
||||
# generic as possible x64 instructions, so that compiling it on a new Intel chip
|
||||
# doesn't enable features not available on older ones or AMD.
|
||||
#
|
||||
# TODO: add similar things for non-amd64, or figure out a more generic way to
|
||||
# do this.
|
||||
|
||||
case $(dpkg-architecture -q DEB_HOST_ARCH) in
|
||||
amd64)
|
||||
export CFLAGS=-march=x86-64
|
||||
;;
|
||||
esac
|
||||
|
||||
# Manually install Poetry and export a pip-compatible `requirements.txt`
|
||||
# We need a Poetry pre-release as the export command is buggy in < 1.2
|
||||
TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
--extras systemd \
|
||||
-o exported_requirements.txt
|
||||
deactivate
|
||||
rm -rf "$TEMP_VENV"
|
||||
|
||||
# Use --no-deps to only install pinned versions in exported_requirements.txt,
|
||||
# and to avoid https://github.com/pypa/pip/issues/9644
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
--python "$SNAKE" \
|
||||
--upgrade-pip \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--preinstall="wheel" \
|
||||
--extra-pip-arg="--no-deps" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
case "$DEB_BUILD_OPTIONS" in
|
||||
*nocheck*)
|
||||
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
|
||||
;;
|
||||
|
||||
*)
|
||||
# Copy tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=$(mktemp -d)
|
||||
trap 'rm -r $tmpdir' EXIT
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
# tweak the paths to the tls certs and signing keys
|
||||
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
|
||||
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
|
||||
|
||||
# tweak the pid file location
|
||||
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
|
||||
|
||||
# tweak the path to the log config
|
||||
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
|
||||
|
||||
# tweak the path to the media store
|
||||
/^media_store_path:/ and s#/media_store#/media#;
|
||||
|
||||
# remove the server_name setting, which is set in a separate file
|
||||
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
|
||||
|
||||
# remove the report_stats setting, which is set in a separate file
|
||||
/^# report_stats:/ and $_ = "";
|
||||
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
# build the log config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
PYPKG=$(basename "$SNAKE")
|
||||
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
|
||||
|
||||
|
||||
# add a couple of triggers. This is needed so that dh-virtualenv can rebuild
|
||||
# the venv when the system python changes (see
|
||||
# https://dh-virtualenv.readthedocs.io/en/latest/tutorial.html#step-2-set-up-packaging-for-your-project)
|
||||
#
|
||||
# we do it here rather than the more conventional way of just adding it to
|
||||
# debian/matrix-synapse-py3.triggers, because we need to add a trigger on the
|
||||
# right version of python.
|
||||
cat >>"debian/.debhelper/generated/matrix-synapse-py3/triggers" <<EOF
|
||||
# triggers for dh-virtualenv
|
||||
interest-noawait $SNAKE
|
||||
interest dh-virtualenv-interpreter-update
|
||||
|
||||
EOF
|
||||
1763
debian/changelog
vendored
1763
debian/changelog
vendored
File diff suppressed because it is too large
Load Diff
1
debian/clean
vendored
1
debian/clean
vendored
@@ -1 +0,0 @@
|
||||
exported_requirements.txt
|
||||
1
debian/compat
vendored
1
debian/compat
vendored
@@ -1 +0,0 @@
|
||||
10
|
||||
44
debian/control
vendored
44
debian/control
vendored
@@ -1,44 +0,0 @@
|
||||
Source: matrix-synapse-py3
|
||||
Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
Build-Depends:
|
||||
debhelper (>= 10),
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
python3-setuptools,
|
||||
python3-pip,
|
||||
python3-venv,
|
||||
tar,
|
||||
Standards-Version: 3.9.8
|
||||
Homepage: https://github.com/matrix-org/synapse
|
||||
|
||||
Package: matrix-synapse-py3
|
||||
Architecture: any
|
||||
Provides: matrix-synapse
|
||||
Conflicts:
|
||||
matrix-synapse (<< 0.34.0.1-0matrix2),
|
||||
matrix-synapse (>= 0.34.0.1-1),
|
||||
Pre-Depends: dpkg (>= 1.16.1)
|
||||
Depends:
|
||||
adduser,
|
||||
debconf,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
${synapse:pydepends},
|
||||
# some of our scripts use perl, but none of them are important,
|
||||
# so we put perl:Depends in Suggests rather than Depends.
|
||||
Recommends:
|
||||
${shlibs1:Recommends},
|
||||
Suggests:
|
||||
sqlite3,
|
||||
${perl:Depends},
|
||||
Description: Open federated Instant Messaging and VoIP server
|
||||
Matrix is an ambitious new ecosystem for open federated Instant
|
||||
Messaging and VoIP. Synapse is a reference Matrix server
|
||||
implementation.
|
||||
95
debian/copyright
vendored
95
debian/copyright
vendored
@@ -1,95 +0,0 @@
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: synapse
|
||||
Source: https://github.com/matrix-org/synapse
|
||||
|
||||
Files: *
|
||||
Copyright: 2014-2017, OpenMarket Ltd, 2017-2018 New Vector Ltd
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/saml2.py
|
||||
Copyright: 2015, Ericsson
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/jwt.py
|
||||
Copyright: 2015, Niklas Riekenbrauck
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/workers.py
|
||||
Copyright: 2016, matrix.org
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/repository.py
|
||||
Copyright: 2014-2015, matrix.org
|
||||
License: Apache-2.0
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
|
||||
2017, Rahul De <rahulde@swecha.net>
|
||||
2017, Sunil Mohan Adapa <sunil@medhas.org>
|
||||
License: Apache-2.0
|
||||
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License version
|
||||
2.0 can be found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
License: BSD-3-clause
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
.
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following
|
||||
disclaimer. Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided with
|
||||
the distribution.
|
||||
.
|
||||
Neither the name of the author nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
License: Expat
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
3
debian/dirs
vendored
3
debian/dirs
vendored
@@ -1,3 +0,0 @@
|
||||
etc/matrix-synapse
|
||||
var/lib/matrix-synapse
|
||||
var/log/matrix-synapse
|
||||
58
debian/hash_password.1
vendored
58
debian/hash_password.1
vendored
@@ -1,58 +0,0 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "HASH_PASSWORD" "1" "July 2021" "" ""
|
||||
.SH "NAME"
|
||||
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
||||
.SH "SYNOPSIS"
|
||||
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
||||
.SH "DESCRIPTION"
|
||||
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
||||
.P
|
||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||
.P
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||
.P
|
||||
The hashed password is written on the \fBSTDOUT\fR\.
|
||||
.SH "FILES"
|
||||
A sample YAML file accepted by \fBhash_password\fR is described below:
|
||||
.P
|
||||
bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-password\fR
|
||||
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
||||
.SH "EXAMPLES"
|
||||
Hash from the command line:
|
||||
.IP "" 4
|
||||
.nf
|
||||
$ hash_password \-p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
||||
.fi
|
||||
.IP "" 0
|
||||
.P
|
||||
Hash from the STDIN:
|
||||
.IP "" 4
|
||||
.nf
|
||||
$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
||||
.fi
|
||||
.IP "" 0
|
||||
.P
|
||||
Using a config file:
|
||||
.IP "" 4
|
||||
.nf
|
||||
$ hash_password \-c config\.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
.fi
|
||||
.IP "" 0
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
69
debian/hash_password.ronn
vendored
69
debian/hash_password.ronn
vendored
@@ -1,69 +0,0 @@
|
||||
hash_password(1) -- Calculate the hash of a new password, so that passwords can be reset
|
||||
========================================================================================
|
||||
|
||||
## SYNOPSIS
|
||||
|
||||
`hash_password` [`-p`|`--password` [password]] [`-c`|`--config` <file>]
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
**hash_password** calculates the hash of a supplied password using bcrypt.
|
||||
|
||||
`hash_password` takes a password as an parameter either on the command line
|
||||
or the `STDIN` if not supplied.
|
||||
|
||||
It accepts an YAML file which can be used to specify parameters like the
|
||||
number of rounds for bcrypt and password_config section having the pepper
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
|
||||
|
||||
The hashed password is written on the `STDOUT`.
|
||||
|
||||
## FILES
|
||||
|
||||
A sample YAML file accepted by `hash_password` is described below:
|
||||
|
||||
bcrypt_rounds: 17
|
||||
password_config:
|
||||
pepper: "random hashing pepper"
|
||||
|
||||
## OPTIONS
|
||||
|
||||
* `-p`, `--password`:
|
||||
Read the password form the command line if [password] is supplied.
|
||||
If not, prompt the user and read the password form the `STDIN`.
|
||||
It is not recommended to type the password on the command line
|
||||
directly. Use the STDIN instead.
|
||||
|
||||
* `-c`, `--config`:
|
||||
Read the supplied YAML <file> containing the options `bcrypt_rounds`
|
||||
and the `password_config` section containing the `pepper` value.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Hash from the command line:
|
||||
|
||||
$ hash_password -p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
|
||||
|
||||
Hash from the STDIN:
|
||||
|
||||
$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
|
||||
|
||||
Using a config file:
|
||||
|
||||
$ hash_password -c config.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
|
||||
## COPYRIGHT
|
||||
|
||||
This man page was written by Rahul De <<rahulde@swecha.net>>
|
||||
for Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
1
debian/install
vendored
1
debian/install
vendored
@@ -1 +0,0 @@
|
||||
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
|
||||
130
debian/manage_debconf.pl
vendored
130
debian/manage_debconf.pl
vendored
@@ -1,130 +0,0 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Interface between our config files and the debconf database.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# manage_debconf.pl <action>
|
||||
#
|
||||
# where <action> can be:
|
||||
#
|
||||
# read: read the configuration from the yaml into debconf
|
||||
# update: update the yaml config according to the debconf database
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Debconf::Client::ConfModule (qw/get set/);
|
||||
|
||||
# map from the name of a setting in our .yaml file to the relevant debconf
|
||||
# setting.
|
||||
my %MAPPINGS=(
|
||||
server_name => 'matrix-synapse/server-name',
|
||||
report_stats => 'matrix-synapse/report-stats',
|
||||
);
|
||||
|
||||
# enable debug if dpkg --debug
|
||||
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
|
||||
|
||||
sub read_config {
|
||||
my @files = @_;
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "reading $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
# rudimentary parsing which (a) avoids having to depend on a yaml library,
|
||||
# and (b) is tolerant of yaml errors
|
||||
while($_ = <$FH>) {
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
$setting = quotemeta $setting;
|
||||
if(/^${setting}\s*:(.*)$/) {
|
||||
my $val = $1;
|
||||
|
||||
# remove leading/trailing whitespace
|
||||
$val =~ s/^\s*//;
|
||||
$val =~ s/\s*$//;
|
||||
|
||||
# remove surrounding quotes
|
||||
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
|
||||
$val = $1;
|
||||
}
|
||||
|
||||
print STDERR ">> $debconf = $val\n" if $DEBUG;
|
||||
set($debconf, $val);
|
||||
}
|
||||
}
|
||||
}
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
sub update_config {
|
||||
my @files = @_;
|
||||
|
||||
my %substs = ();
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
my @res = get($debconf);
|
||||
$substs{$setting} = $res[1] if $res[0] == 0;
|
||||
}
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "checking $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
my $updated = 0;
|
||||
|
||||
# read the whole file into memory
|
||||
my @lines = <$FH>;
|
||||
|
||||
while (my ($setting, $val) = each %substs) {
|
||||
$setting = quotemeta $setting;
|
||||
|
||||
map {
|
||||
if (/^${setting}\s*:\s*(.*)\s*$/) {
|
||||
my $current = $1;
|
||||
if ($val ne $current) {
|
||||
$_ = "${setting}: $val\n";
|
||||
$updated = 1;
|
||||
}
|
||||
}
|
||||
} @lines;
|
||||
}
|
||||
close $FH;
|
||||
|
||||
next unless $updated;
|
||||
|
||||
print STDERR "updating $file\n" if $DEBUG;
|
||||
open $FH, ">", $file or die "unable to update $file";
|
||||
print $FH @lines;
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
|
||||
my $read = 0;
|
||||
my $update = 0;
|
||||
|
||||
if (not $cmd) {
|
||||
die "must specify a command to perform\n";
|
||||
} elsif ($cmd eq 'read') {
|
||||
$read = 1;
|
||||
} elsif ($cmd eq 'update') {
|
||||
$update = 1;
|
||||
} else {
|
||||
die "unknown command '$cmd'\n";
|
||||
}
|
||||
|
||||
my @files = (
|
||||
"/etc/matrix-synapse/homeserver.yaml",
|
||||
glob("/etc/matrix-synapse/conf.d/*.yaml"),
|
||||
);
|
||||
|
||||
if ($read) {
|
||||
read_config(@files);
|
||||
} elsif ($update) {
|
||||
update_config(@files);
|
||||
}
|
||||
5
debian/manpages
vendored
5
debian/manpages
vendored
@@ -1,5 +0,0 @@
|
||||
debian/hash_password.1
|
||||
debian/register_new_matrix_user.1
|
||||
debian/synapse_port_db.1
|
||||
debian/synapse_review_recent_signups.1
|
||||
debian/synctl.1
|
||||
17
debian/matrix-synapse-py3.config
vendored
17
debian/matrix-synapse-py3.config
vendored
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
# try to update the debconf db according to whatever is in the config files
|
||||
#
|
||||
# note that we may get run during preconfiguration, in which case the script
|
||||
# will not yet be installed.
|
||||
[ -x /opt/venvs/matrix-synapse/lib/manage_debconf.pl ] && \
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read
|
||||
|
||||
db_input high matrix-synapse/server-name || true
|
||||
db_input high matrix-synapse/report-stats || true
|
||||
db_go
|
||||
6
debian/matrix-synapse-py3.links
vendored
6
debian/matrix-synapse-py3.links
vendored
@@ -1,6 +0,0 @@
|
||||
opt/venvs/matrix-synapse/bin/hash_password usr/bin/hash_password
|
||||
opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matrix_user
|
||||
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
||||
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
|
||||
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
||||
opt/venvs/matrix-synapse/bin/update_synapse_database usr/bin/update_synapse_database
|
||||
57
debian/matrix-synapse-py3.postinst
vendored
57
debian/matrix-synapse-py3.postinst
vendored
@@ -1,57 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
CONFIGFILE_SERVERNAME="/etc/matrix-synapse/conf.d/server_name.yaml"
|
||||
CONFIGFILE_REPORTSTATS="/etc/matrix-synapse/conf.d/report_stats.yaml"
|
||||
USER="matrix-synapse"
|
||||
|
||||
case "$1" in
|
||||
configure|reconfigure)
|
||||
|
||||
# generate template config files if they don't exist
|
||||
mkdir -p "/etc/matrix-synapse/conf.d/"
|
||||
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
|
||||
cat > "$CONFIGFILE_SERVERNAME" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
# The domain name of the server, with optional explicit port.
|
||||
# This is used by remote servers to connect to this server,
|
||||
# e.g. matrix.org, localhost:8080, etc.
|
||||
# This is also the last part of your UserID.
|
||||
#
|
||||
server_name: ''
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
|
||||
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
# Whether to report homeserver usage statistics.
|
||||
report_stats: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
# update the config files according to whatever is in the debconf database
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
|
||||
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
|
||||
dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
|
||||
fi
|
||||
done
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user