1
0

Compare commits

..

9 Commits

Author SHA1 Message Date
Mark Haines
3a676b8ee3 More merging 2016-04-21 16:28:05 +01:00
Mark Haines
0d5622b088 Merge branch 'markjh/slave_event_push_actions' into markjh/split_pusher 2016-04-21 16:24:38 +01:00
Mark Haines
712030aeef Merge branch 'develop' into markjh/split_pusher 2016-04-21 16:21:49 +01:00
Mark Haines
0b282d33af Add an HTTP API for removing rejected pushers.
When a push is rejected by the push gateway then synapse needs to
remove the pusher from the database. However we probably don't want
to do that directly from the slave, so we add an HTTP API to synapse
to remove the pusher from the database.
2016-04-19 14:43:47 +01:00
Mark Haines
03c8df54f0 Invalidate the receipt cache correctly 2016-04-14 17:25:27 +01:00
Mark Haines
c214d3e36e Merge branch 'develop' into markjh/split_pusher 2016-04-14 17:00:40 +01:00
Mark Haines
1c1b2de975 Poke the slaved pushers on new receipts 2016-04-14 16:59:56 +01:00
Mark Haines
f41b1a8723 Make push sort of work 2016-04-14 13:30:57 +01:00
Mark Haines
1209d3174e Optionally split out the pusher into a separate process 2016-04-14 11:20:48 +01:00
650 changed files with 21702 additions and 84776 deletions

View File

@@ -1,159 +0,0 @@
version: 2
jobs:
dockerhubuploadrelease:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:$CIRCLE_TAG
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_SHA1 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker tag matrixdotorg/synapse:$CIRCLE_SHA1 matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:$CIRCLE_SHA1
- run: docker push matrixdotorg/synapse:latest
sytestpy2:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2postgres:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3postgres:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
workflows:
version: 2
build:
jobs:
- sytestpy2:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:
only: /^v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:
only: master

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -e
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is!"
exit 1
fi
# Get the reference, using the GitHub API
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
# Show what we are before
git show -s
# Set up username so it can do a merge
git config --global user.email bot@matrix.org
git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
git merge --no-edit origin/$GITBASE
# Show what we are after.
git show -s

View File

@@ -1,7 +0,0 @@
Dockerfile
.travis.yml
.gitignore
demo/etc
tox.ini
.git/*
.tox/*

View File

@@ -1,48 +0,0 @@
<!--
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
Describe here the problem that you are experiencing, or the feature you are requesting.
### Steps to reproduce
- For bugs, list the steps
- that reproduce the bug
- using hyphens as bullet points
Describe how what happens differs from what you expected.
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
If not matrix.org:
- **Version**: What version of Synapse is running? <!--
You can find the Synapse version by inspecting the server headers (replace matrix.org with
your own homeserver domain):
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
-->
- **Install method**: package manager/git clone/pip
- **Platform**: Tell us about the environment in which your homeserver is operating
- distro, hardware, if it's running in a vm/container, etc.

19
.gitignore vendored
View File

@@ -1,11 +1,8 @@
*.pyc
.*.swp
*~
*.lock
.DS_Store
_trial_temp/
_trial_temp*/
logs/
dbs/
*.egg
@@ -16,7 +13,6 @@ docs/build/
cmdclient_config.json
homeserver*.db
homeserver*.log
homeserver*.log.*
homeserver*.pid
homeserver*.yaml
@@ -28,15 +24,14 @@ homeserver*.yaml
.coverage
htmlcov
demo/*/*.db
demo/*/*.log
demo/*/*.log.*
demo/*/*.pid
demo/*.db
demo/*.log
demo/*.log.*
demo/*.pid
demo/media_store.*
demo/etc
uploads
cache
.idea/
media_store/
@@ -44,9 +39,6 @@ media_store/
*.tac
build/
venv/
venv*/
*venv/
localhost-800*/
static/client/register/register_config.js
@@ -54,6 +46,3 @@ static/client/register/register_config.js
env/
*.config
.vscode/
.ropeproject/

View File

@@ -1,52 +0,0 @@
sudo: false
language: python
# tell travis to cache ~/.cache/pip
cache: pip
before_script:
- git remote set-branches --add origin develop
- git fetch origin develop
matrix:
fast_finish: true
include:
- python: 2.7
env: TOX_ENV=packaging
- python: 2.7
env: TOX_ENV=pep8
- python: 2.7
env: TOX_ENV=py27
- python: 2.7
env: TOX_ENV=py27-old
- python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.5
env: TOX_ENV=py35
- python: 3.6
env: TOX_ENV=py36
- python: 3.6
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.6
env: TOX_ENV=check_isort
- python: 3.6
env: TOX_ENV=check-newsfragment
install:
- pip install tox
script:
- tox -e $TOX_ENV

View File

@@ -60,9 +60,3 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix

2782
CHANGES.md

File diff suppressed because it is too large Load Diff

1031
CHANGES.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -30,28 +30,8 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
pull requests to synapse get automatically tested by Travis and CircleCI.
If your change breaks the build, this will be shown in GitHub, so please
keep an eye on the pull request for feedback.
To run unit tests in a local development environment, you can use:
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
SQLite-backed Synapse on Python 2.7.
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the `documentation in the SyTest repo
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
information.
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
Code style
~~~~~~~~~~
@@ -64,27 +44,6 @@ Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Changelog
~~~~~~~~~
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain Markdown
formatting. Adding credits to the changelog is encouraged, we value
your contributions and would like to have you shouted out in the
release notes!
For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix".
Attribution
~~~~~~~~~~~
@@ -93,8 +52,7 @@ AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and
we'll try to fix it :)
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
Sign off
~~~~~~~~
@@ -143,27 +101,18 @@ the contribution or otherwise have the right to contribute it to Matrix::
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
We accept contributions under a legally identifiable name, such as
your name on government documentation or common-law names (names
claimed by legitimate usage or repute). Unfortunately, we cannot
accept anonymous contributions at this time.
Git allows you to add this signoff automatically when using the ``-s``
flag to ``git commit``, which uses the name and email set in your
``user.name`` and ``user.email`` git configs.
...using your real name; unfortunately pseudonyms and anonymous contributions
can't be accepted. Git makes this trivial - just use the -s flag when you do
``git commit``, having first set ``user.name`` and ``user.email`` git configs
(which you should have done anyway :)
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
matrix together all the fragmented communication technologies out there we are
reliant on contributions and collaboration from the community to do so. So
please get involved - and we hope you have as much fun hacking on Matrix as we
do!
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!

View File

@@ -2,7 +2,6 @@ include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
@@ -12,10 +11,8 @@ recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.py
recursive-include docs *
recursive-include res *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include synapse/static *.css
@@ -25,16 +22,5 @@ recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
exclude jenkins*
exclude Dockerfile
exclude .dockerignore
exclude test_postgresql.sh
recursive-exclude jenkins *.sh
include pyproject.toml
recursive-include changelog.d *
prune .github
prune demo/etc
prune docker
prune .circleci

File diff suppressed because it is too large Load Diff

View File

@@ -5,60 +5,30 @@ Before upgrading check if any special steps are required to upgrade from the
what you currently have installed to current version of synapse. The extra
instructions that may be required are listed later in this document.
1. If synapse was installed in a virtualenv then active that virtualenv before
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
run:
.. code:: bash
source ~/.synapse/bin/activate
2. If synapse was installed using pip then upgrade to the latest version by
running:
.. code:: bash
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
# restart synapse
synctl restart
If synapse was installed using git then upgrade to the latest version by
running:
.. code:: bash
# Pull the latest version of the master branch.
git pull
# Update the versions of synapse's python dependencies.
python synapse/python_dependencies.py | xargs pip install --upgrade
# restart synapse
./synctl restart
To check whether your update was sucessful, you can check the Server header
returned by the Client-Server API:
If synapse was installed in a virtualenv then active that virtualenv before
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
.. code:: bash
# replace <host.name> with the hostname of your synapse homeserver.
# You may need to specify a port (eg, :8448) if your server is not
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
source ~/.synapse/bin/activate
Upgrading to $NEXT_VERSION
====================
If synapse was installed using pip then upgrade to the latest version by
running:
This release expands the anonymous usage stats sent if the opt-in
``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
.. code:: bash
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
If synapse was installed using git then upgrade to the latest version by
running:
.. code:: bash
# Pull the latest version of the master branch.
git pull
# Update the versions of synapse's python dependencies.
python synapse/python_dependencies.py | xargs -n1 pip install
We would appreciate it if you could assist by ensuring this module is available
and ``report_stats`` is enabled. This will let us see if performance changes to
synapse are having an impact to the general community.
Upgrading to v0.15.0
====================
@@ -98,7 +68,7 @@ It has been replaced by specifying a list of application service registrations i
``homeserver.yaml``::
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
Where ``registration-01.yaml`` looks like::
url: <String> # e.g. "https://my.application.service.com"
@@ -187,7 +157,7 @@ This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
@@ -196,18 +166,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is,
unfortunately, non trivial and requires human intervention to resolve any
resulting conflicts during the upgrade process.
Before running the command the homeserver should be first completely
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.
Upgrading to v0.4.0
@@ -266,7 +236,7 @@ automatically generate default config use::
--config-path homeserver.config \
--generate-config
This config can be edited if desired, for example to specify a different SSL
This config can be edited if desired, for example to specify a different SSL
certificate to use. Once done you can run the home server using::
$ python synapse/app/homeserver.py --config-path homeserver.config
@@ -287,20 +257,20 @@ This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
Before running the command the homeserver should be first completely
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.

View File

@@ -1 +0,0 @@
!.gitignore

View File

@@ -1 +0,0 @@
Fix problem when playing media from Chrome using direct URL (thanks @remjey!)

View File

@@ -1,2 +0,0 @@
Unit tests can now be run under PostgreSQL in Docker using
``test_postgresql.sh``.

View File

@@ -1 +0,0 @@
Speed up calculation of typing updates for replication

View File

@@ -1 +0,0 @@
support registering regular users non-interactively with register_new_matrix_user script

View File

@@ -1 +0,0 @@
Fix broken invite email links for self hosted riots

View File

@@ -1,2 +0,0 @@
Remove documentation regarding installation on Cygwin, the use of WSL is
recommended instead.

View File

@@ -1 +0,0 @@
Don't ratelimit autojoins

View File

@@ -1 +0,0 @@
Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables.

View File

@@ -1 +0,0 @@
Fix 500 error when deleting unknown room alias

View File

@@ -1 +0,0 @@
Fix some b'abcd' noise in logs and metrics

View File

@@ -1 +0,0 @@
Report "python_version" in the phone home stats

View File

@@ -1 +0,0 @@
Fix some b'abcd' noise in logs and metrics

View File

@@ -1 +0,0 @@
Fix typo in README, synaspse -> synapse

View File

@@ -1 +0,0 @@
When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers.

View File

@@ -1 +0,0 @@
Increase the timeout when filling missing events in federation requests

View File

@@ -1 +0,0 @@
Improve the logging when handling a federation transaction

View File

@@ -1 +0,0 @@
Improve logging of outbound federation requests

View File

@@ -1 +0,0 @@
Fix incorrect server-name indication for outgoing federation requests

View File

@@ -1 +0,0 @@
Fix adding client IPs to the database failing on Python 3.

View File

@@ -1 +0,0 @@
Improve logging of outbound federation requests

View File

@@ -1 +0,0 @@
Fix bug where things occaisonally were not being timed out correctly.

View File

@@ -1 +0,0 @@
Fix the docker image building on python 3

View File

@@ -1 +0,0 @@
Add a regression test for logging failed HTTP requests on Python 3.

View File

@@ -1 +0,0 @@
Fix bug where outbound federation would stop talking to some servers when using workers

View File

@@ -1 +0,0 @@
Always LL ourselves if we're in a room

View File

@@ -1 +0,0 @@
Comments and interface cleanup for on_receive_pdu

View File

@@ -1 +0,0 @@
Fix spurious exceptions when remote http client closes conncetion

View File

@@ -1 +0,0 @@
Log exceptions thrown by background tasks

View File

@@ -1 +0,0 @@
Fix some instances of ExpiringCache not expiring cache items

View File

@@ -1 +0,0 @@
Fix out-of-bounds error when LLing yourself

View File

@@ -1 +0,0 @@
Automate pushes to docker hub

View File

@@ -1 +0,0 @@
Require attrs 16.0.0 or later

View File

@@ -1 +0,0 @@
Fix incompatibility with python3 on alpine

View File

@@ -1 +0,0 @@
Run the test suite on the oldest supported versions of our dependencies in CI.

View File

@@ -1 +0,0 @@
Fix exceptions from metrics handler

View File

@@ -1 +0,0 @@
CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches.

View File

@@ -1 +0,0 @@
Fix docstrings and add tests for state store methods

View File

@@ -1 +0,0 @@
Include eventid in log lines when processing incoming federation transactions

View File

@@ -1 +0,0 @@
Fix errors due to concurrent monthly_active_user upserts

View File

@@ -1 +0,0 @@
fix docstring for FederationClient.get_state_for_room

View File

@@ -1 +0,0 @@
Run notify_app_services as a bg process

View File

@@ -1 +0,0 @@
Improve the logging when handling a federation transaction

View File

@@ -1 +0,0 @@
Clarifications in FederationHandler

View File

@@ -1 +0,0 @@
Replaced all occurences of e.message with str(e). Contributed by Schnuffle

View File

@@ -1,10 +0,0 @@
Community Contributions
=======================
Everything in this directory are projects submitted by the community that may be useful
to others. As such, the project maintainers cannot guarantee support, stability
or backwards compatibility of these projects.
Files in this directory should *not* be relied on directly, as they may not
continue to work or exist in future. If you wish to use any of these files then
they should be copied to avoid them breaking from underneath you.

View File

@@ -32,7 +32,7 @@ import urlparse
import nacl.signing
import nacl.encoding
from signedjson.sign import verify_signed_json, SignatureVerifyException
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"

View File

@@ -36,13 +36,15 @@ class HttpClient(object):
the request body. This will be encoded as JSON.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
def get_json(self, url, args=None):
""" Gets some json from the given host homeserver and path
""" Get's some json from the given host homeserver and path
Args:
url (str): The URL to GET data from.
@@ -52,8 +54,10 @@ class HttpClient(object):
and *not* a string.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
@@ -210,4 +214,4 @@ class _JsonProducer(object):
pass
def stopProducing(self):
pass
pass

View File

@@ -1,41 +0,0 @@
# Synapse Docker
### Automated configuration
It is recommended that you use Docker Compose to run your containers, including
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
including example labels for reverse proxying and other artifacts.
Read the section about environment variables and set at least mandatory variables,
then run the server:
```
docker-compose up -d
```
If secrets are not specified in the environment variables, they will be generated
as part of the startup. Please ensure these secrets are kept between launches of the
Docker container, as their loss may require users to log in again.
### Manual configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
```
Then, customize your configuration and run the server:
```
docker-compose up -d
```
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)

View File

@@ -1,50 +0,0 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
version: '3'
services:
synapse:
build: ../..
image: docker.io/matrixdotorg/synapse:latest
# Since snyapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
- SYNAPSE_SERVER_NAME=my.matrix.host
- SYNAPSE_REPORT_STATS=no
- SYNAPSE_ENABLE_REGISTRATION=yes
- SYNAPSE_LOG_LEVEL=INFO
- POSTGRES_PASSWORD=changeme
volumes:
# You may either store all the files in a local folder
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
- db
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8448
db:
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postfesql/data

View File

@@ -1,50 +0,0 @@
# Example log_config file for synapse. To enable, point `log_config` to it in
# `homeserver.yaml`, and restart synapse.
#
# This configuration will produce similar results to the defaults within
# synapse, but can be edited to give more flexibility.
version: 1
formatters:
fmt:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
# example output to console
console:
class: logging.StreamHandler
filters: [context]
# example output to file - to enable, edit 'root' config below.
file:
class: logging.handlers.RotatingFileHandler
formatter: fmt
filename: /var/log/synapse/homeserver.log
maxBytes: 100000000
backupCount: 3
filters: [context]
root:
level: INFO
handlers: [console] # to use file handler instead, switch to [file]
loggers:
synapse:
level: INFO
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
# example of enabling debugging for a component:
#
# synapse.federation.transport.server:
# level: DEBUG

View File

@@ -1,6 +0,0 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up additional recording rules

File diff suppressed because it is too large Load Diff

View File

@@ -22,8 +22,6 @@ import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from six import string_types
def make_graph(file_name, room_id, file_prefix, limit):
print "Reading lines"
@@ -60,7 +58,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, string_types):
elif isinstance(value, basestring):
pass
else:
value = json.dumps(value)

View File

@@ -1,37 +0,0 @@
This directory contains some sample monitoring config for using the
'Prometheus' monitoring server against synapse.
To use it, first install prometheus by following the instructions at
http://prometheus.io/
### for Prometheus v1
Add a new job to the main prometheus.conf file:
job: {
name: "synapse"
target_group: {
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
}
}
### for Prometheus v2
Add a new job to the main prometheus.yml file:
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
# when endpoint uses https:
scheme: "https"
static_configs:
- targets: ['SERVER.LOCATION:PORT']
To use `synapse.rules` add
rule_files:
- "/PATH/TO/synapse-v2.rules"
Metrics are disabled by default when running synapse; they must be enabled
with the 'enable-metrics' option, either in the synapse config file or as a
command-line option.

View File

@@ -1,395 +0,0 @@
{{ template "head" . }}
{{ template "prom_content_head" . }}
<h1>System Resources</h1>
<h3>CPU</h3>
<div id="process_resource_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resource_utime"),
expr: "rate(process_cpu_seconds_total[2m]) * 100",
name: "[[job]]",
min: 0,
max: 100,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "%",
yTitle: "CPU Usage"
})
</script>
<h3>Memory</h3>
<div id="process_resource_maxrss"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resource_maxrss"),
expr: "process_psutil_rss:max",
name: "Maxrss",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "bytes",
yTitle: "Usage"
})
</script>
<h3>File descriptors</h3>
<div id="process_fds"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_fds"),
expr: "process_open_fds{job='synapse'}",
name: "FDs",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "",
yTitle: "Descriptors"
})
</script>
<h1>Reactor</h1>
<h3>Total reactor time</h3>
<div id="reactor_total_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_total_time"),
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
name: "time",
max: 1,
min: 0,
renderer: "area",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Average reactor tick time</h3>
<div id="reactor_average_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_average_time"),
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
name: "time",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
})
</script>
<h3>Pending calls per tick</h3>
<div id="reactor_pending_calls"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
name: "calls",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Cals"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>
<div id="synapse_storage_query_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_query_time"),
expr: "rate(synapse_storage_query_time:count[2m])",
name: "[[verb]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "queries/s",
yTitle: "Queries"
})
</script>
<h3>Transactions</h3>
<div id="synapse_storage_transaction_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transaction_time"),
expr: "rate(synapse_storage_transaction_time:count[2m])",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "txn/s",
yTitle: "Transactions"
})
</script>
<h3>Transaction execution time</h3>
<div id="synapse_storage_transactions_time_msec"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transactions_time_msec"),
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Database scheduling latency</h3>
<div id="synapse_storage_schedule_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_schedule_time"),
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
name: "Total latency",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Cache hit ratio</h3>
<div id="synapse_cache_ratio"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_ratio"),
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
name: "[[name]]",
min: 0,
max: 100,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "%",
yTitle: "Percentage"
})
</script>
<h3>Cache size</h3>
<div id="synapse_cache_size"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_size"),
expr: "synapse_util_caches_cache:size",
name: "[[name]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Items"
})
</script>
<h1>Requests</h1>
<h3>Requests by Servlet</h3>
<div id="synapse_http_server_request_count_servlet"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_request_count:servlet[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h4>&nbsp;(without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Average response times</h3>
<div id="synapse_http_server_response_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h3>All responses by code</h3>
<div id="synapse_http_server_responses"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses"),
expr: "rate(synapse_http_server_responses[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Error responses by code</h3>
<div id="synapse_http_server_responses_err"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses_err"),
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>CPU Usage</h3>
<div id="synapse_http_server_response_ru_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "CPU Usage"
})
</script>
<h3>DB Usage</h3>
<div id="synapse_http_server_response_db_txn_duration"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "DB Usage"
})
</script>
<h3>Average event send times</h3>
<div id="synapse_http_server_send_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h1>Federation</h1>
<h3>Sent Messages</h3>
<div id="synapse_federation_client_sent"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_client_sent"),
expr: "rate(synapse_federation_client_sent[2m])",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Received Messages</h3>
<div id="synapse_federation_server_received"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_server_received"),
expr: "rate(synapse_federation_server_received[2m])",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Pending</h3>
<div id="synapse_federation_transaction_queue_pending"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
expr: "synapse_federation_transaction_queue_pending",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Units"
})
</script>
<h1>Clients</h1>
<h3>Notifiers</h3>
<div id="synapse_notifier_listeners"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_listeners"),
expr: "synapse_notifier_listeners",
name: "listeners",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Listeners"
})
</script>
<h3>Notified Events</h3>
<div id="synapse_notifier_notified_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_notified_events"),
expr: "rate(synapse_notifier_notified_events[2m])",
name: "events",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "events/s",
yTitle: "Event rate"
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View File

@@ -1,21 +0,0 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0

View File

@@ -1,60 +0,0 @@
groups:
- name: synapse
rules:
- record: "synapse_federation_transaction_queue_pendingEdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
- record: 'synapse_http_server_request_count:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- record: 'synapse_cache:hit_ratio_30s'
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
expr: 'sum(synapse_federation_server_received_queries) by (job)'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "EDU"
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'

View File

@@ -1,10 +1,7 @@
# This assumes that Synapse has been installed as a system package
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
# rather than in a user home directory or similar under virtualenv.
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
[Unit]
Description=Synapse Matrix homeserver
@@ -13,10 +10,7 @@ Type=simple
User=synapse
Group=synapse
WorkingDirectory=/var/lib/synapse
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
[Install]
WantedBy=multi-user.target

View File

@@ -1,42 +0,0 @@
ARG PYTHON_VERSION=2
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
COPY . /synapse
RUN apk add --no-cache --virtual .build_deps \
build-base \
libffi-dev \
libjpeg-turbo-dev \
libressl-dev \
libxslt-dev \
linux-headers \
postgresql-dev \
zlib-dev \
&& cd /synapse \
&& apk add --no-cache --virtual .runtime_deps \
libffi \
libjpeg-turbo \
libressl \
libxslt \
libpq \
zlib \
su-exec \
&& pip install --upgrade \
lxml \
pip \
psycopg2 \
setuptools \
&& mkdir -p /synapse/cache \
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
&& mv /synapse/docker/start.py /synapse/docker/conf / \
&& rm -rf \
setup.cfg \
setup.py \
synapse \
&& apk del .build_deps
VOLUME ["/data"]
EXPOSE 8008/tcp 8448/tcp
ENTRYPOINT ["/start.py"]

View File

@@ -1,12 +0,0 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:latest
# The Sytest image doesn't come with python, so install that
RUN apt-get -qq install -y python python-dev python-pip
# We need tox to run the tests in run_pg_tests.sh
RUN pip install tox
ADD run_pg_tests.sh /pg_tests.sh
ENTRYPOINT /pg_tests.sh

View File

@@ -1,125 +0,0 @@
# Synapse Docker
This Docker image will run Synapse as a single process. It does not provide a database
server or a TURN server, you should run these separately.
## Run
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
We instead release only tagged versions so upgrading between releases is entirely
within your control.
### Using docker-compose (easier)
This image is designed to run either with an automatically generated configuration
file or with a custom configuration that requires manual editing.
An easy way to make use of this image is via docker-compose. See the
[contrib/docker](../contrib/docker)
section of the synapse project for examples.
### Without Compose (harder)
If you do not wish to use Compose, you may still run this image using plain
Docker commands. Note that the following is just a guideline and you may need
to add parameters to the docker run command to account for the network situation
with your postgres database.
```
docker run \
-d \
--name synapse \
-v ${DATA_PATH}:/data \
-e SYNAPSE_SERVER_NAME=my.matrix.host \
-e SYNAPSE_REPORT_STATS=yes \
docker.io/matrixdotorg/synapse:latest
```
## Volumes
The image expects a single volume, located at ``/data``, that will hold:
* temporary files during uploads;
* uploaded media and thumbnails;
* the SQLite database if you do not configure postgres;
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
disposal. For instance, ``/data/media`` coud be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
In order to setup an application service, simply create an ``appservices``
directory in the data volume and write the application service Yaml
configuration file there. Multiple application services are supported.
## Environment
Unless you specify a custom path for the configuration file, a very generic
file will be generated, based on the following environment settings.
These are a good starting point for setting up your own deployment.
Global settings:
* ``UID``, the user id Synapse will run as [default 991]
* ``GID``, the group id Synapse will run as [default 991]
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
then customize it manually. No other environment variable is required.
Otherwise, a dynamic configuration file will be used. The following environment
variables are available for configuration:
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
statistics reporting back to the Matrix project which helps us to get funding.
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
you run your own TLS-capable reverse proxy).
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
the Synapse instance.
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
key in order to enable recaptcha upon registration.
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
key in order to enable recaptcha upon registration.
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
uris to enable TURN for this homeserver.
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
Shared secrets, that will be initialized to random values if not set:
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
registration is disable.
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
to the server.
Database specific values (will use SQLite if not set):
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
Mail server specific values (will not send emails if not set):
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
## Build
Build the docker image with the `docker build` command from the root of the synapse repository.
```
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
```
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
You may have a local Python wheel cache available, in which case copy the relevant
packages in the ``cache/`` directory at the root of the project.

View File

@@ -1,219 +0,0 @@
# vim:ft=yaml
## TLS ##
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
tls_fingerprints: []
## Server ##
server_name: "{{ SYNAPSE_SERVER_NAME }}"
pid_file: /homeserver.pid
web_client: False
soft_file_limit: 0
## Ports ##
listeners:
{% if not SYNAPSE_NO_TLS %}
-
port: 8448
bind_addresses: ['0.0.0.0']
type: http
tls: true
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation] # Federation APIs
compress: false
{% endif %}
- port: 8008
tls: false
bind_addresses: ['0.0.0.0']
type: http
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation]
compress: false
## Database ##
{% if POSTGRES_PASSWORD %}
database:
name: "psycopg2"
args:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
cp_min: 5
cp_max: 10
{% else %}
database:
name: "sqlite3"
args:
database: "/data/homeserver.db"
{% endif %}
## Performance ##
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
verbose: 0
log_file: "/data/homeserver.log"
log_config: "/compiled/log.config"
## Ratelimiting ##
rc_messages_per_second: 0.2
rc_message_burst_count: 10.0
federation_rc_window_size: 1000
federation_rc_sleep_limit: 10
federation_rc_sleep_delay: 500
federation_rc_reject_limit: 50
federation_rc_concurrent: 3
## Files ##
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false
# List of thumbnail to precalculate when an image is uploaded.
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 320
height: 240
method: scale
- width: 640
height: 480
method: scale
- width: 800
height: 600
method: scale
url_preview_enabled: False
max_spider_size: "10M"
## Captcha ##
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
enable_registration_captcha: True
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% else %}
recaptcha_public_key: "YOUR_PUBLIC_KEY"
recaptcha_private_key: "YOUR_PRIVATE_KEY"
enable_registration_captcha: False
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% endif %}
## Turn ##
{% if SYNAPSE_TURN_URIS %}
turn_uris:
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
{% endfor %}
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% else %}
turn_uris: []
turn_shared_secret: "YOUR_SHARED_SECRET"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% endif %}
## Registration ##
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
bcrypt_rounds: 12
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
enable_group_creation: true
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
- riot.im
## Metrics ###
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
enable_metrics: True
report_stats: True
{% else %}
enable_metrics: False
report_stats: False
{% endif %}
## API Configuration ##
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.name"
{% if SYNAPSE_APPSERVICES %}
app_service_config_files:
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
{% endfor %}
{% else %}
app_service_config_files: []
{% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
expire_access_token: False
## Signing Keys ##
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
old_signing_keys: {}
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
password_config:
enabled: true
{% if SYNAPSE_SMTP_HOST %}
email:
enable_notifs: false
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
require_transport_security: False
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
app_name: Matrix
template_dir: res/templates
notif_template_html: notif_mail.html
notif_template_text: notif_mail.txt
notif_for_new_users: True
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
{% endif %}

View File

@@ -1,29 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
root:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
handlers: [console]

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Initialise & start the database
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=/tmp -e py27-postgres

View File

@@ -1,67 +0,0 @@
#!/usr/local/bin/python
import jinja2
import os
import sys
import subprocess
import glob
import codecs
# Utility functions
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
def check_arguments(environ, args):
for argument in args:
if argument not in environ:
print("Environment variable %s is mandatory, exiting." % argument)
sys.exit(2)
def generate_secrets(environ, secrets):
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
if os.path.exists(filename):
with open(filename) as handle: value = handle.read()
else:
print("Generating a random secret for {}".format(name))
value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle: handle.write(value)
environ[secret] = value
# Prepare the configuration
mode = sys.argv[1] if len(sys.argv) > 1 else None
environ = os.environ.copy()
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
args = ["python", "-m", "synapse.app.homeserver"]
# In generate mode, generate a configuration, missing keys, then exit
if mode == "generate":
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
args += [
"--server-name", environ["SYNAPSE_SERVER_NAME"],
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
"--generate-config"
]
os.execv("/usr/local/bin/python", args)
# In normal mode, generate missing keys if any, then run synapse
else:
# Parse the configuration file
if "SYNAPSE_CONFIG_PATH" in environ:
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
else:
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
generate_secrets(environ, {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
})
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
if not os.path.exists("/compiled"): os.mkdir("/compiled")
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
convert("/conf/log.config", "/compiled/log.config", environ)
subprocess.check_output(["chown", "-R", ownership, "/data"])
args += ["--config-path", "/compiled/homeserver.yaml"]
# Generate missing keys and start synapse
subprocess.check_output(args + ["--generate-keys"])
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)

View File

@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
Setting ReCaptcha Keys
----------------------
The keys are a config option on the home server config. If they are not
visible, you can generate them via --generate-config. Set the following value::
The keys are a config option on the home server config. If they are not
visible, you can generate them via --generate-config. Set the following value:
recaptcha_public_key: YOUR_PUBLIC_KEY
recaptcha_private_key: YOUR_PRIVATE_KEY
In addition, you MUST enable captchas via::
In addition, you MUST enable captchas via:
enable_registration_captcha: true
@@ -25,5 +25,7 @@ Configuring IP used for auth
The ReCaptcha API requires that the IP address of the user who solved the
captcha is sent. If the client is connecting through a proxy or load balancer,
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
IP address. This can be configured using the x_forwarded directive in the
listeners section of the homeserver.yaml configuration file.
IP address. This can be configured as an option on the home server like so:
captcha_ip_origin_is_x_forwarded: true

View File

@@ -1,12 +0,0 @@
Admin APIs
==========
This directory includes documentation for the various synapse specific admin
APIs available.
Only users that are server admins can use these APIs. A user can be marked as a
server admin by updating the database directly, e.g.:
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
Restarting may be required for the changes to register.

View File

@@ -1,23 +0,0 @@
# List all media in a room
This API gets a list of known media in a room.
The API is:
```
GET /_matrix/client/r0/admin/room/<room_id>/media
```
including an `access_token` of a server admin.
It returns a JSON body like the following:
```
{
"local": [
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
"mxc://localhost/abcdefghijklmnopqrstuvwx"
],
"remote": [
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
]
}
```

View File

@@ -1,63 +0,0 @@
Purge History API
=================
The purge history API allows server admins to purge historic events from their
database, reclaiming disk space.
Depending on the amount of history being purged a call to the API may take
several minutes or longer. During this period users will not be able to
paginate further back in the room from the point being purged from.
The API is:
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
including an ``access_token`` of a server admin.
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
deleted.)
Room state data (such as joins, leaves, topic) is always preserved.
To delete local message events as well, set ``delete_local_events`` in the body:
.. code:: json
{
"delete_local_events": true
}
The caller must specify the point in the room to purge up to. This can be
specified by including an event_id in the URI, or by setting a
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
id is given, that event (and others at the same graph depth) will be retained.
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
in milliseconds.
The API starts the purge running, and returns immediately with a JSON body with
a purge id:
.. code:: json
{
"purge_id": "<opaque id>"
}
Purge status query
------------------
It is possible to poll for updates on recent purges with a second API;
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
(again, with a suitable ``access_token``). This API returns a JSON body like
the following:
.. code:: json
{
"status": "active"
}
The status will be one of ``active``, ``complete``, or ``failed``.

View File

@@ -1,17 +0,0 @@
Purge Remote Media API
======================
The purge remote media API allows server admins to purge old cached remote
media.
The API is::
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
{}
Which will remove all cached media that was last accessed before
``<unix_timestamp_in_ms>``.
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.

View File

@@ -1,63 +0,0 @@
Shared-Secret Registration
==========================
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(``registration_shared_secret`` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API::
> GET /_matrix/client/r0/admin/register
< {"nonce": "thisisanonce"}
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content.
As an example::
> POST /_matrix/client/r0/admin/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"
}
< {
"access_token": "token_here",
"user_id": "@pepper_roni:localhost",
"home_server": "test",
"device_id": "device_id_here"
}
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, and either
the string "admin" or "notadmin", each separated by NULs. For an example of
generation in Python::
import hmac, hashlib
def generate_mac(nonce, user, password, admin=False):
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce.encode('utf8'))
mac.update(b"\x00")
mac.update(user.encode('utf8'))
mac.update(b"\x00")
mac.update(password.encode('utf8'))
mac.update(b"\x00")
mac.update(b"admin" if admin else b"notadmin")
return mac.hexdigest()

View File

@@ -1,86 +0,0 @@
Query Account
=============
This API returns information about a specific user account.
The api is::
GET /_matrix/client/r0/admin/whois/<user_id>
including an ``access_token`` of a server admin.
It returns a JSON body like the following:
.. code:: json
{
"user_id": "<user_id>",
"devices": {
"": {
"sessions": [
{
"connections": [
{
"ip": "1.2.3.4",
"last_seen": 1417222374433,
"user_agent": "Mozilla/5.0 ..."
},
{
"ip": "1.2.3.10",
"last_seen": 1417222374500,
"user_agent": "Dalvik/2.1.0 ..."
}
]
}
]
}
}
}
``last_seen`` is measured in milliseconds since the Unix epoch.
Deactivate Account
==================
This API deactivates an account. It removes active access tokens, resets the
password, and deletes third-party IDs (to prevent the user requesting a
password reset). It can also mark the user as GDPR-erased (stopping their data
from distributed further, and deleting it entirely if there are no other
references to it).
The api is::
POST /_matrix/client/r0/admin/deactivate/<user_id>
with a body of:
.. code:: json
{
"erase": true
}
including an ``access_token`` of a server admin.
The erase parameter is optional and defaults to 'false'.
An empty body may be passed for backwards compatibility.
Reset password
==============
Changes the password of another user.
The api is::
POST /_matrix/client/r0/admin/reset_password/<user_id>
with a body of:
.. code:: json
{
"new_password": "<secret>"
}
including an ``access_token`` of a server admin.

View File

@@ -32,4 +32,5 @@ The format of the AS configuration file is as follows:
See the spec_ for further details on how application services work.
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api

View File

@@ -1,119 +1,49 @@
- Everything should comply with PEP8. Code should pass
``pep8 --max-line-length=100`` without any warnings.
Basically, PEP8
- **Indenting**:
- NEVER tabs. 4 spaces to indent.
- follow PEP8; either hanging indent or multiline-visual indent depending
on the size and shape of the arguments and what makes more sense to the
author. In other words, both this::
print("I am a fish %s" % "moo")
and this::
print("I am a fish %s" %
"moo")
and this::
print(
"I am a fish %s" %
"moo",
)
...are valid, although given each one takes up 2x more vertical space than
the previous, it's up to the author's discretion as to which layout makes
most sense for their function invocation. (e.g. if they want to add
comments per-argument, or put expressions in the arguments, or group
related arguments together, or want to deliberately extend or preserve
vertical/horizontal space)
- **Line length**:
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
- NEVER tabs. 4 spaces to indent.
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
the overflowing content is not semantically significant and avoids an
explosion of vertical whitespace).
Use parentheses instead of ``\`` for line continuation where ever possible
(which is pretty much everywhere).
- **Naming**:
- Use camel case for class and type names
- Use underscores for functions and variables.
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
- **Blank lines**:
- There should be max a single new line between:
- Use camel case for class and type names
- Use underscores for functions and variables.
- Use double quotes.
- Use parentheses instead of '\\' for line continuation where ever possible
(which is pretty much everywhere)
- There should be max a single new line between:
- statements
- functions in a class
- There should be two new lines between:
- There should be two new lines between:
- definitions in a module (e.g., between different classes)
- There should be spaces where spaces should be and not where there shouldn't be:
- a single space after a comma
- a single space before and after for '=' when used as assignment
- no spaces before and after for '=' for default values and keyword arguments.
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
depending on the size and shape of the arguments and what makes more sense to
the author. In other words, both this::
- **Whitespace**:
print("I am a fish %s" % "moo")
There should be spaces where spaces should be and not where there shouldn't
be:
and this::
- a single space after a comma
- a single space before and after for '=' when used as assignment
- no spaces before and after for '=' for default values and keyword arguments.
print("I am a fish %s" %
"moo")
- **Comments**: should follow the `google code style
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
This is so that we can generate documentation with `sphinx
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
`examples
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
in the sphinx documentation.
and this::
- **Imports**:
print(
"I am a fish %s" %
"moo"
)
- Prefer to import classes and functions than packages or modules.
...are valid, although given each one takes up 2x more vertical space than
the previous, it's up to the author's discretion as to which layout makes most
sense for their function invocation. (e.g. if they want to add comments
per-argument, or put expressions in the arguments, or group related arguments
together, or want to deliberately extend or preserve vertical/horizontal
space)
Example::
Comments should follow the google code style. This is so that we can generate
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
from synapse.types import UserID
...
user_id = UserID(local, server)
is preferred over::
from synapse import types
...
user_id = types.UserID(local, server)
(or any other variant).
This goes against the advice in the Google style guide, but it means that
errors in the name are caught early (at import time).
- Multiple imports from the same package can be combined onto one line::
from synapse.types import GroupID, RoomID, UserID
An effort should be made to keep the individual imports in alphabetical
order.
If the list becomes long, wrap it with parentheses and split it over
multiple lines.
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
imports should be grouped in the following order, with a blank line between
each group:
1. standard library imports
2. related third party imports
3. local application/library specific imports
- Imports within each group should be sorted alphabetically by module name.
- Avoid wildcard imports (``from synapse.types import *``) and relative
imports (``from .types import UserID``).
Code should pass pep8 --max-line-length=100 without any warnings.

View File

@@ -1,160 +0,0 @@
Support in Synapse for tracking agreement to server terms and conditions
========================================================================
Synapse 0.30 introduces support for tracking whether users have agreed to the
terms and conditions set by the administrator of a server - and blocking access
to the server until they have.
There are several parts to this functionality; each requires some specific
configuration in `homeserver.yaml` to be enabled.
Note that various parts of the configuation and this document refer to the
"privacy policy": agreement with a privacy policy is one particular use of this
feature, but of course adminstrators can specify other terms and conditions
unrelated to "privacy" per se.
Collecting policy agreement from a user
---------------------------------------
Synapse can be configured to serve the user a simple policy form with an
"accept" button. Clicking "Accept" records the user's acceptance in the
database and shows a success page.
To enable this, first create templates for the policy and success pages.
These should be stored on the local filesystem.
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
and [docs/privacy_policy_templates](privacy_policy_templates) gives
examples of the sort of thing that can be done.
Note that the templates must be stored under a name giving the language of the
template - currently this must always be `en` (for "English");
internationalisation support is intended for the future.
The template for the policy itself should be versioned and named according to
the version: for example `1.0.html`. The version of the policy which the user
has agreed to is stored in the database.
Once the templates are in place, make the following changes to `homeserver.yaml`:
1. Add a `user_consent` section, which should look like:
```yaml
user_consent:
template_dir: privacy_policy_templates
version: 1.0
```
`template_dir` points to the directory containing the policy
templates. `version` defines the version of the policy which will be served
to the user. In the example above, Synapse will serve
`privacy_policy_templates/en/1.0.html`.
2. Add a `form_secret` setting at the top level:
```yaml
form_secret: "<unique secret>"
```
This should be set to an arbitrary secret string (try `pwgen -y 30` to
generate suitable secrets).
More on what this is used for below.
3. Add `consent` wherever the `client` resource is currently enabled in the
`listeners` configuration. For example:
```yaml
listeners:
- port: 8008
resources:
- names:
- client
- consent
```
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
python-jinja2`.
Once this is complete, and the server has been restarted, try visiting
`https://<server>/_matrix/consent`. If correctly configured, this should give
an error "Missing string query parameter 'u'". It is now possible to manually
construct URIs where users can give their consent.
### Constructing the consent URI
It may be useful to manually construct the "consent URI" for a given user - for
instance, in order to send them an email asking them to consent. To do this,
take the base `https://<server>/_matrix/consent` URL and add the following
query parameters:
* `u`: the user id of the user. This can either be a full MXID
(`@user:server.com`) or just the localpart (`user`).
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
possible to calculate this on the commandline with something like:
```bash
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
```
This should result in a URI which looks something like:
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
Sending users a server notice asking them to agree to the policy
----------------------------------------------------------------
It is possible to configure Synapse to send a [server
notice](server_notices.md) to anybody who has not yet agreed to the current
version of the policy. To do so:
* ensure that the consent resource is configured, as in the previous section
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
example:
```yaml
user_consent:
server_notice_content:
msgtype: m.text
body: >-
Please give your consent to the privacy policy at %(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the server notice.)
Blocking users from using the server until they agree to the policy
-------------------------------------------------------------------
Synapse can be configured to block any attempts to join rooms or send messages
until the user has given their agreement to the policy. (Joining the server
notices room is exempted from this).
To enable this, add `block_events_error` under `user_consent`. For example:
```yaml
user_consent:
block_events_error: >-
You can't send any messages until you consent to the privacy policy at
%(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the error.)

View File

@@ -1,442 +0,0 @@
Log contexts
============
.. contents::
To help track the processing of individual requests, synapse uses a
'log context' to track which request it is handling at any given moment. This
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
the information back out of the thread-local variable and add it to each log
record.
Logcontexts are also used for CPU and database accounting, so that we can track
which requests were responsible for high CPU use or database activity.
The ``synapse.util.logcontext`` module provides a facilities for managing the
current log context (as well as providing the ``LoggingContextFilter`` class).
Deferreds make the whole thing complicated, so this document describes how it
all works, and how to write code which follows the rules.
Logcontexts without Deferreds
-----------------------------
In the absence of any Deferred voodoo, things are simple enough. As with any
code of this nature, the rule is that our function should leave things as it
found them:
.. code:: python
from synapse.util import logcontext # omitted from future snippets
def handle_request(request_id):
request_context = logcontext.LoggingContext()
calling_context = logcontext.LoggingContext.current_context()
logcontext.LoggingContext.set_current_context(request_context)
try:
request_context.request = request_id
do_request_handling()
logger.debug("finished")
finally:
logcontext.LoggingContext.set_current_context(calling_context)
def do_request_handling():
logger.debug("phew") # this will be logged against request_id
LoggingContext implements the context management methods, so the above can be
written much more succinctly as:
.. code:: python
def handle_request(request_id):
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
do_request_handling()
logger.debug("finished")
def do_request_handling():
logger.debug("phew")
Using logcontexts with Deferreds
--------------------------------
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
the linear flow of code so that there is no longer a single entry point where
we should set the logcontext and a single exit point where we should remove it.
Consider the example above, where ``do_request_handling`` needs to do some
blocking operation, and returns a deferred:
.. code:: python
@defer.inlineCallbacks
def handle_request(request_id):
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
yield do_request_handling()
logger.debug("finished")
In the above flow:
* The logcontext is set
* ``do_request_handling`` is called, and returns a deferred
* ``handle_request`` yields the deferred
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
So we have stopped processing the request (and will probably go on to start
processing the next), without clearing the logcontext.
To circumvent this problem, synapse code assumes that, wherever you have a
deferred, you will want to yield on it. To that end, whereever functions return
a deferred, we adopt the following conventions:
**Rules for functions returning deferreds:**
* If the deferred is already complete, the function returns with the same
logcontext it started with.
* If the deferred is incomplete, the function clears the logcontext before
returning; when the deferred completes, it restores the logcontext before
running any callbacks.
That sounds complicated, but actually it means a lot of code (including the
example above) "just works". There are two cases:
* If ``do_request_handling`` returns a completed deferred, then the logcontext
will still be in place. In this case, execution will continue immediately
after the ``yield``; the "finished" line will be logged against the right
context, and the ``with`` block restores the original context before we
return to the caller.
* If the returned deferred is incomplete, ``do_request_handling`` clears the
logcontext before returning. The logcontext is therefore clear when
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
wrapper adds a callback to the deferred, and returns another (incomplete)
deferred to the caller, and it is safe to begin processing the next request.
Once ``do_request_handling``'s deferred completes, it will reinstate the
logcontext, before running the callback added by the ``inlineCallbacks``
wrapper. That callback runs the second half of ``handle_request``, so again
the "finished" line will be logged against the right
context, and the ``with`` block restores the original context.
As an aside, it's worth noting that ``handle_request`` follows our rules -
though that only matters if the caller has its own logcontext which it cares
about.
The following sections describe pitfalls and helpful patterns when implementing
these rules.
Always yield your deferreds
---------------------------
Whenever you get a deferred back from a function, you should ``yield`` on it
as soon as possible. (Returning it directly to your caller is ok too, if you're
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
call any other functions.
.. code:: python
@defer.inlineCallbacks
def fun():
logger.debug("starting")
yield do_some_stuff() # just like this
d = more_stuff()
result = yield d # also fine, of course
defer.returnValue(result)
def nonInlineCallbacksFun():
logger.debug("just a wrapper really")
return do_some_stuff() # this is ok too - the caller will yield on
# it anyway.
Provided this pattern is followed all the way back up to the callchain to where
the logcontext was set, this will make things work out ok: provided
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
It's all too easy to forget to ``yield``: for instance if we forgot that
``do_some_stuff`` returned a deferred, we might plough on regardless. This
leads to a mess; it will probably work itself out eventually, but not before
a load of stuff has been logged against the wrong content. (Normally, other
things will break, more obviously, if you forget to ``yield``, so this tends
not to be a major problem in practice.)
Of course sometimes you need to do something a bit fancier with your Deferreds
- not all code follows the linear A-then-B-then-C pattern. Notes on
implementing more complex patterns are in later sections.
Where you create a new Deferred, make it follow the rules
---------------------------------------------------------
Most of the time, a Deferred comes from another synapse function. Sometimes,
though, we need to make up a new Deferred, or we get a Deferred back from
external code. We need to make it follow our rules.
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
which returns a deferred which will run its callbacks after a given number of
seconds. That might look like:
.. code:: python
# not a logcontext-rules-compliant function
def get_sleep_deferred(seconds):
d = defer.Deferred()
reactor.callLater(seconds, d.callback, None)
return d
That doesn't follow the rules, but we can fix it by wrapping it with
``PreserveLoggingContext`` and ``yield`` ing on it:
.. code:: python
@defer.inlineCallbacks
def sleep(seconds):
with PreserveLoggingContext():
yield get_sleep_deferred(seconds)
This technique works equally for external functions which return deferreds,
or deferreds we have made ourselves.
You can also use ``logcontext.make_deferred_yieldable``, which just does the
boilerplate for you, so the above could be written:
.. code:: python
def sleep(seconds):
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
Fire-and-forget
---------------
Sometimes you want to fire off a chain of execution, but not wait for its
result. That might look a bit like this:
.. code:: python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
# *don't* do this
background_operation()
logger.debug("Request handling complete")
@defer.inlineCallbacks
def background_operation():
yield first_background_step()
logger.debug("Completed first step")
yield second_background_step()
logger.debug("Completed second step")
The above code does a couple of steps in the background after
``do_request_handling`` has finished. The log lines are still logged against
the ``request_context`` logcontext, which may or may not be desirable. There
are two big problems with the above, however. The first problem is that, if
``background_operation`` returns an incomplete Deferred, it will expect its
caller to ``yield`` immediately, so will have cleared the logcontext. In this
example, that means that 'Request handling complete' will be logged without any
context.
The second problem, which is potentially even worse, is that when the Deferred
returned by ``background_operation`` completes, it will restore the original
logcontext. There is nothing waiting on that Deferred, so the logcontext will
leak into the reactor and possibly get attached to some arbitrary future
operation.
There are two potential solutions to this.
One option is to surround the call to ``background_operation`` with a
``PreserveLoggingContext`` call. That will reset the logcontext before
starting ``background_operation`` (so the context restored when the deferred
completes will be the empty logcontext), and will restore the current
logcontext before continuing the foreground process:
.. code:: python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
# start background_operation off in the empty logcontext, to
# avoid leaking the current context into the reactor.
with PreserveLoggingContext():
background_operation()
# this will now be logged against the request context
logger.debug("Request handling complete")
Obviously that option means that the operations done in
``background_operation`` would be not be logged against a logcontext (though
that might be fixed by setting a different logcontext via a ``with
LoggingContext(...)`` in ``background_operation``).
The second option is to use ``logcontext.run_in_background``, which wraps a
function so that it doesn't reset the logcontext even when it returns an
incomplete deferred, and adds a callback to the returned deferred to reset the
logcontext. In other words, it turns a function that follows the Synapse rules
about logcontexts and Deferreds into one which behaves more like an external
function — the opposite operation to that described in the previous section.
It can be used like this:
.. code:: python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
logcontext.run_in_background(background_operation)
# this will now be logged against the request context
logger.debug("Request handling complete")
Passing synapse deferreds into third-party functions
----------------------------------------------------
A typical example of this is where we want to collect together two or more
deferred via ``defer.gatherResults``:
.. code:: python
d1 = operation1()
d2 = operation2()
d3 = defer.gatherResults([d1, d2])
This is really a variation of the fire-and-forget problem above, in that we are
firing off ``d1`` and ``d2`` without yielding on them. The difference
is that we now have third-party code attached to their callbacks. Anyway either
technique given in the `Fire-and-forget`_ section will work.
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
in order to make it follow the logcontext rules before we can yield it, as
described in `Where you create a new Deferred, make it follow the rules`_.
So, option one: reset the logcontext before starting the operations to be
gathered:
.. code:: python
@defer.inlineCallbacks
def do_request_handling():
with PreserveLoggingContext():
d1 = operation1()
d2 = operation2()
result = yield defer.gatherResults([d1, d2])
In this case particularly, though, option two, of using
``logcontext.preserve_fn`` almost certainly makes more sense, so that
``operation1`` and ``operation2`` are both logged against the original
logcontext. This looks like:
.. code:: python
@defer.inlineCallbacks
def do_request_handling():
d1 = logcontext.preserve_fn(operation1)()
d2 = logcontext.preserve_fn(operation2)()
with PreserveLoggingContext():
result = yield defer.gatherResults([d1, d2])
Was all this really necessary?
------------------------------
The conventions used work fine for a linear flow where everything happens in
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
follow for any more exotic flows. It's hard not to wonder if we could have done
something else.
We're not going to rewrite Synapse now, so the following is entirely of
academic interest, but I'd like to record some thoughts on an alternative
approach.
I briefly prototyped some code following an alternative set of rules. I think
it would work, but I certainly didn't get as far as thinking how it would
interact with concepts as complicated as the cache descriptors.
My alternative rules were:
* functions always preserve the logcontext of their caller, whether or not they
are returning a Deferred.
* Deferreds returned by synapse functions run their callbacks in the same
context as the function was orignally called in.
The main point of this scheme is that everywhere that sets the logcontext is
responsible for clearing it before returning control to the reactor.
So, for example, if you were the function which started a ``with
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
off the background process, and then leave the ``with`` block to wait for it:
.. code:: python
def handle_request(request_id):
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
d = do_request_handling()
def cb(r):
logger.debug("finished")
d.addCallback(cb)
return d
(in general, mixing ``with LoggingContext`` blocks and
``defer.inlineCallbacks`` in the same function leads to slighly
counter-intuitive code, under this scheme).
Because we leave the original ``with`` block as soon as the Deferred is
returned (as opposed to waiting for it to be resolved, as we do today), the
logcontext is cleared before control passes back to the reactor; so if there is
some code within ``do_request_handling`` which needs to wait for a Deferred to
complete, there is no need for it to worry about clearing the logcontext before
doing so:
.. code:: python
def handle_request():
r = do_some_stuff()
r.addCallback(do_some_more_stuff)
return r
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
runs its callbacks in the original logcontext, all is happy.
The business of a Deferred which runs its callbacks in the original logcontext
isn't hard to achieve — we have it today, in the shape of
``logcontext._PreservingContextDeferred``:
.. code:: python
def do_some_stuff():
deferred = do_some_io()
pcd = _PreservingContextDeferred(LoggingContext.current_context())
deferred.chainDeferred(pcd)
return pcd
It turns out that, thanks to the way that Deferreds chain together, we
automatically get the property of a context-preserving deferred with
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
on has that property. So we can just write:
.. code:: python
@defer.inlineCallbacks
def handle_request():
yield do_some_stuff()
yield do_some_more_stuff()
To conclude: I think this scheme would have worked equally well, with less
danger of messing it up, and probably made some more esoteric code easier to
write. But again — changing the conventions of the entire Synapse codebase is
not a sensible option for the marginal improvement offered.

View File

@@ -1,43 +0,0 @@
Using the synapse manhole
=========================
The "manhole" allows server administrators to access a Python shell on a running
Synapse installation. This is a very powerful mechanism for administration and
debugging.
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`:
```yaml
listeners:
- port: 9000
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
(`bind_addresses` in the above is important: it ensures that access to the
manhole is only possible for local users).
Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
Then restart synapse, and point an ssh client at port 9000 on localhost, using
the username `matrix`:
```bash
ssh -p9000 matrix@localhost
```
The password is `rabbithole`.
This gives a Python REPL in which `hs` gives access to the
`synapse.server.HomeServer` object - which in turn gives access to many other
parts of the process.
As a simple example, retrieving an event from the database:
```
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
```

View File

@@ -1,180 +1,50 @@
How to monitor Synapse metrics using Prometheus
===============================================
1. Install Prometheus:
1: Install prometheus:
Follow instructions at http://prometheus.io/docs/introduction/install/
Follow instructions at http://prometheus.io/docs/introduction/install/
2: Enable synapse metrics:
Simply setting a (local) port number will enable it. Pick a port.
prometheus itself defaults to 9090, so starting just above that for
locally monitored services seems reasonable. E.g. 9092:
2. Enable Synapse metrics:
Add to homeserver.yaml
There are two methods of enabling metrics in Synapse.
metrics_port: 9092
The first serves the metrics as a part of the usual web server and can be
enabled by adding the "metrics" resource to the existing listener as such::
Restart synapse
resources:
- names:
- client
- metrics
3: Check out synapse-prometheus-config
https://github.com/matrix-org/synapse-prometheus-config
This provides a simple way of adding metrics to your Synapse installation,
and serves under ``/_synapse/metrics``. If you do not wish your metrics be
publicly exposed, you will need to either filter it out at your load
balancer, or use the second method.
4: Add ``synapse.html`` and ``synapse.rules``
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
and the ``.rules`` file needs to be invoked somewhere in the main config
file. A symlink to each from the git checkout into the prometheus directory
might be easiest to ensure ``git pull`` keeps it updated.
The second method runs the metrics server on a different port, in a
different thread to Synapse. This can make it more resilient to heavy load
meaning metrics cannot be retrieved, and can be exposed to just internal
networks easier. The served metrics are available over HTTP only, and will
be available at ``/``.
5: Add a prometheus target for synapse
This is easiest if prometheus runs on the same machine as synapse, as it can
then just use localhost::
Add a new listener to homeserver.yaml::
global: {
rule_file: "synapse.rules"
}
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
job: {
name: "synapse"
For both options, you will need to ensure that ``enable_metrics`` is set to
``True``.
target_group: {
target: "http://localhost:9092/"
}
}
Restart Synapse.
6: Start prometheus::
3. Add a Prometheus target for Synapse.
./prometheus -config.file=prometheus.conf
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
7: Wait a few seconds for it to start and perform the first scrape,
then visit the console:
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
static_configs:
- targets: ["my.server.here:9092"]
If your prometheus is older than 1.5.2, you will need to replace
``static_configs`` in the above with ``target_groups``.
Restart Prometheus.
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
All time duration-based metrics have been changed to be seconds. This affects:
+----------------------------------+
| msec -> sec metrics |
+==================================+
| python_gc_time |
+----------------------------------+
| python_twisted_reactor_tick_time |
+----------------------------------+
| synapse_storage_query_time |
+----------------------------------+
| synapse_storage_schedule_time |
+----------------------------------+
| synapse_storage_transaction_time |
+----------------------------------+
Several metrics have been changed to be histograms, which sort entries into
buckets and allow better analysis. The following metrics are now histograms:
+-------------------------------------------+
| Altered metrics |
+===========================================+
| python_gc_time |
+-------------------------------------------+
| python_twisted_reactor_pending_calls |
+-------------------------------------------+
| python_twisted_reactor_tick_time |
+-------------------------------------------+
| synapse_http_server_response_time_seconds |
+-------------------------------------------+
| synapse_storage_query_time |
+-------------------------------------------+
| synapse_storage_schedule_time |
+-------------------------------------------+
| synapse_storage_transaction_time |
+-------------------------------------------+
Block and response metrics renamed for 0.27.0
---------------------------------------------
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
metrics reported for the resource tracking for code blocks and HTTP requests.
At the same time, the corresponding ``*:total`` metrics are being renamed, as
the ``:total`` suffix no longer makes sense in the absence of a corresponding
``:count`` metric.
To enable a graceful migration path, this release just adds new names for the
metrics being renamed. A future release will remove the old ones.
The following table shows the new metrics, and the old metrics which they are
replacing.
==================================================== ===================================================
New name Old name
==================================================== ===================================================
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
synapse_http_server_response_count synapse_http_server_requests
synapse_http_server_response_count synapse_http_server_response_time:count
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
==================================================== ===================================================
Standard Metric Names
---------------------
As of synapse version 0.18.2, the format of the process-wide metrics has been
changed to fit prometheus standard naming conventions. Additionally the units
have been changed to seconds, from miliseconds.
================================== =============================
New name Old name
================================== =============================
process_cpu_user_seconds_total process_resource_utime / 1000
process_cpu_system_seconds_total process_resource_stime / 1000
process_open_fds (no 'type' label) process_fds
================================== =============================
The python-specific counts of garbage collector performance have been renamed.
=========================== ======================
New name Old name
=========================== ======================
python_gc_time reactor_gc_time
python_gc_unreachable_total reactor_gc_unreachable
python_gc_counts reactor_gc_counts
=========================== ======================
The twisted-specific reactor metrics have been renamed.
==================================== =====================
New name Old name
==================================== =====================
python_twisted_reactor_pending_calls reactor_pending_calls
python_twisted_reactor_tick_time reactor_tick_time
==================================== =====================
http://server-where-prometheus-runs:9090/consoles/synapse.html

View File

@@ -1,99 +0,0 @@
Password auth provider modules
==============================
Password auth providers offer a way for server administrators to integrate
their Synapse installation with an existing authentication system.
A password auth provider is a Python class which is dynamically loaded into
Synapse, and provides a number of methods by which it can integrate with the
authentication system.
This document serves as a reference for those looking to implement their own
password auth providers.
Required methods
----------------
Password auth provider classes must provide the following methods:
*class* ``SomeProvider.parse_config``\(*config*)
This method is passed the ``config`` object for this module from the
homeserver configuration file.
It should perform any appropriate sanity checks on the provided
configuration, and return an object which is then passed into ``__init__``.
*class* ``SomeProvider``\(*config*, *account_handler*)
The constructor is passed the config object returned by ``parse_config``,
and a ``synapse.module_api.ModuleApi`` object which allows the
password provider to check if accounts exist and/or create new ones.
Optional methods
----------------
Password auth provider classes may optionally provide the following methods.
*class* ``SomeProvider.get_db_schema_files``\()
This method, if implemented, should return an Iterable of ``(name,
stream)`` pairs of database schema files. Each file is applied in turn at
initialisation, and a record is then made in the database so that it is
not re-applied on the next start.
``someprovider.get_supported_login_types``\()
This method, if implemented, should return a ``dict`` mapping from a login
type identifier (such as ``m.login.password``) to an iterable giving the
fields which must be provided by the user in the submission to the
``/login`` api. These fields are passed in the ``login_dict`` dictionary
to ``check_auth``.
For example, if a password auth provider wants to implement a custom login
type of ``com.example.custom_login``, where the client is expected to pass
the fields ``secret1`` and ``secret2``, the provider should implement this
method and return the following dict::
{"com.example.custom_login": ("secret1", "secret2")}
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
This method is the one that does the real work. If implemented, it will be
called for each login attempt where the login type matches one of the keys
returned by ``get_supported_login_types``.
It is passed the (possibly UNqualified) ``user`` provided by the client,
the login type, and a dictionary of login secrets passed by the client.
The method should return a Twisted ``Deferred`` object, which resolves to
the canonical ``@localpart:domain`` user id if authentication is successful,
and ``None`` if not.
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
which case the second field is a callback which will be called with the
result from the ``/login`` call (including ``access_token``, ``device_id``,
etc.)
``someprovider.check_password``\(*user_id*, *password*)
This method provides a simpler interface than ``get_supported_login_types``
and ``check_auth`` for password auth providers that just want to provide a
mechanism for validating ``m.login.password`` logins.
Iif implemented, it will be called to check logins with an
``m.login.password`` login type. It is passed a qualified
``@localpart:domain`` user id, and the password provided by the user.
The method should return a Twisted ``Deferred`` object, which resolves to
``True`` if authentication is successful, and ``False`` if not.
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
This method, if implemented, is called when a user logs out. It is passed
the qualified user ID, the ID of the deactivated device (if any: access
tokens are occasionally created without an associated device ID), and the
(now deactivated) access token.
It may return a Twisted ``Deferred`` object; the logout request will wait
for the deferred to complete but the result is ignored.

View File

@@ -1,27 +1,19 @@
Using Postgres
--------------
Postgres version 9.4 or later is known to work.
Set up database
===============
Assuming your PostgreSQL database user is called ``postgres``, create a user
``synapse_user`` with::
su - postgres
createuser --pwprompt synapse_user
The PostgreSQL database used *must* have the correct encoding set, otherwise it
The PostgreSQL database used *must* have the correct encoding set, otherwise
would not be able to store UTF8 strings. To create a database with the correct
encoding use, e.g.::
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
@@ -52,8 +44,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
Synapse config
==============
When you are ready to start using PostgreSQL, edit the ``database`` section in
your config file to match the following lines::
When you are ready to start using PostgreSQL, add the following line to your
config file::
database:
name: psycopg2
@@ -102,12 +94,9 @@ complete, restart synapse. For instance::
cp homeserver.db homeserver.db.snapshot
./synctl start
Copy the old config file into a new config file::
cp homeserver.yaml homeserver-postgres.yaml
Edit the database section as described in the section *Synapse config* above
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
Assuming your new config file (as described in the section *Synapse config*)
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
``homeserver.db.snapshot`` then simply run::
synapse_port_db --sqlite-database homeserver.db.snapshot \
--postgres-config homeserver-postgres.yaml
@@ -123,14 +112,9 @@ script one last time, e.g. if the SQLite database is at ``homeserver.db``
run::
synapse_port_db --sqlite-database homeserver.db \
--postgres-config homeserver-postgres.yaml
--postgres-config database_config.yaml
Once that has completed, change the synapse config to point at the PostgreSQL
database configuration file ``homeserver-postgres.yaml``::
./synctl stop
mv homeserver.yaml homeserver-old-sqlite.yaml
mv homeserver-postgres.yaml homeserver.yaml
./synctl start
Synapse should now be running against PostgreSQL.
database configuration file using the ``database_config`` parameter (see
`Synapse Config`_) and restart synapse. Synapse should now be running against
PostgreSQL.

View File

@@ -1,23 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
{% if has_consented %}
<p>
Your base already belong to us.
</p>
{% else %}
<p>
All your base are belong to us.
</p>
<form method="post" action="consent">
<input type="hidden" name="v" value="{{version}}"/>
<input type="hidden" name="u" value="{{user}}"/>
<input type="hidden" name="h" value="{{userhmac}}"/>
<input type="submit" value="Sure thing!"/>
</form>
{% endif %}
</body>
</html>

View File

@@ -1,11 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
<p>
Sweet.
</p>
</body>
</html>

View File

@@ -26,10 +26,28 @@ expose the append-only log to the readers should be fairly minimal.
Architecture
------------
The Replication Protocol
~~~~~~~~~~~~~~~~~~~~~~~~
The Replication API
~~~~~~~~~~~~~~~~~~~
See ``tcp_replication.rst``
Synapse will optionally expose a long poll HTTP API for extracting updates. The
API will have a similar shape to /sync in that clients provide tokens
indicating where in the log they have reached and a timeout. The synapse server
then either responds with updates immediately if it already has updates or it
waits until the timeout for more updates. If the timeout expires and nothing
happened then the server returns an empty response.
However unlike the /sync API this replication API is returning synapse specific
data rather than trying to implement a matrix specification. The replication
results are returned as arrays of rows where the rows are mostly lifted
directly from the database. This avoids unnecessary JSON parsing on the server
and hopefully avoids an impedance mismatch between the data returned and the
required updates to the datastore.
This does not replicate all the database tables as many of the database tables
are indexes that can be recovered from the contents of other tables.
The format and parameters for the api are documented in
``synapse/replication/resource.py``.
The Slaved DataStore

View File

@@ -1,74 +0,0 @@
Server Notices
==============
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
channel whereby server administrators can send messages to users on the server.
They are used as part of communication of the server polices(see
[consent_tracking.md](consent_tracking.md)), however the intention is that
they may also find a use for features such as "Message of the day".
This is a feature specific to Synapse, but it uses standard Matrix
communication mechanisms, so should work with any Matrix client.
User experience
---------------
When the user is first sent a server notice, they will get an invitation to a
room (typically called 'Server Notices', though this is configurable in
`homeserver.yaml`). They will be **unable to reject** this invitation -
attempts to do so will receive an error.
Once they accept the invitation, they will see the notice message in the room
history; it will appear to have come from the 'server notices user' (see
below).
The user is prevented from sending any messages in this room by the power
levels.
Having joined the room, the user can leave the room if they want. Subsequent
server notices will then cause a new room to be created.
Synapse configuration
---------------------
Server notices come from a specific user id on the server. Server
administrators are free to choose the user id - something like `server` is
suggested, meaning the notices will come from
`@server:<your_server_name>`. Once the Server Notices user is configured, that
user id becomes a special, privileged user, so administrators should ensure
that **it is not already allocated**.
In order to support server notices, it is necessary to add some configuration
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
section, which should look like this:
```yaml
server_notices:
system_mxid_localpart: server
system_mxid_display_name: "Server Notices"
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
room_name: "Server Notices"
```
The only compulsory setting is `system_mxid_localpart`, which defines the user
id of the Server Notices user, as above. `room_name` defines the name of the
room which will be created.
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
displayname and avatar of the Server Notices user.
Sending notices
---------------
As of the current version of synapse, there is no convenient interface for
sending notices (other than the automated ones sent as part of consent
tracking).
In the meantime, it is possible to test this feature using the manhole. Having
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
sent with something like:
```
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
```

View File

@@ -50,7 +50,7 @@ master_doc = 'index'
# General information about the project.
project = u'Synapse'
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
copyright = u'2014, TNG'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the

View File

@@ -1,223 +0,0 @@
TCP Replication
===============
Motivation
----------
Previously the workers used an HTTP long poll mechanism to get updates from the
master, which had the problem of causing a lot of duplicate work on the server.
This TCP protocol replaces those APIs with the aim of increased efficiency.
Overview
--------
The protocol is based on fire and forget, line based commands. An example flow
would be (where '>' indicates master to worker and '<' worker to master flows)::
> SERVER example.com
< REPLICATE events 53
> RDATA events 54 ["$foo1:bar.com", ...]
> RDATA events 55 ["$foo4:bar.com", ...]
The example shows the server accepting a new connection and sending its identity
with the ``SERVER`` command, followed by the client asking to subscribe to the
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
format of ``<row>`` is defined by the individual streams.
Error reporting happens by either the client or server sending an `ERROR`
command, and usually the connection will be closed.
Since the protocol is a simple line based, its possible to manually connect to
the server using a tool like netcat. A few things should be noted when manually
using the protocol:
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
be used to get all future updates. The special stream name ``ALL`` can be used
with ``NOW`` to subscribe to all available streams.
* The federation stream is only available if federation sending has been
disabled on the main process.
* The server will only time connections out that have sent a ``PING`` command.
If a ping is sent then the connection will be closed if no further commands
are receieved within 15s. Both the client and server protocol implementations
will send an initial PING on connection and ensure at least one command every
5s is sent (not necessarily ``PING``).
* ``RDATA`` commands *usually* include a numeric token, however if the stream
has multiple rows to replicate per token the server will send multiple
``RDATA`` commands, with all but the last having a token of ``batch``. See
the documentation on ``commands.RdataCommand`` for further details.
Architecture
------------
The basic structure of the protocol is line based, where the initial word of
each line specifies the command. The rest of the line is parsed based on the
command. For example, the `RDATA` command is defined as::
RDATA <stream_name> <token> <row_json>
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
Blank lines are ignored.
Keep alives
~~~~~~~~~~~
Both sides are expected to send at least one command every 5s or so, and
should send a ``PING`` command if necessary. If either side do not receive a
command within e.g. 15s then the connection should be closed.
Because the server may be connected to manually using e.g. netcat, the timeouts
aren't enabled until an initial ``PING`` command is seen. Both the client and
server implementations below send a ``PING`` command immediately on connection to
ensure the timeouts are enabled.
This ensures that both sides can quickly realize if the tcp connection has gone
and handle the situation appropriately.
Start up
~~~~~~~~
When a new connection is made, the server:
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
the client to detect if its connected to the expected server
* Sends a ``PING`` command as above, to enable the client to time out connections
promptly.
The client:
* Sends a ``NAME`` command, allowing the server to associate a human friendly
name with the connection. This is optional.
* Sends a ``PING`` as above
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
with the stream_name and token it wants to subscribe from.
* On receipt of a ``SERVER`` command, checks that the server name matches the
expected server name.
Error handling
~~~~~~~~~~~~~~
If either side detects an error it can send an ``ERROR`` command and close the
connection.
If the client side loses the connection to the server it should reconnect,
following the steps above.
Congestion
~~~~~~~~~~
If the server sends messages faster than the client can consume them the server
will first buffer a (fairly large) number of commands and then disconnect the
client. This ensures that we don't queue up an unbounded number of commands in
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
recovers it can reconnect to the server and ask for missed messages.
Reliability
~~~~~~~~~~~
In general the replication stream should be considered an unreliable transport
since e.g. commands are not resent if the connection disappears.
The exception to that are the replication streams, i.e. RDATA commands, since
these include tokens which can be used to restart the stream on connection
errors.
The client should keep track of the token in the last RDATA command received
for each stream so that on reconneciton it can start streaming from the correct
place. Note: not all RDATA have valid tokens due to batching. See
``RdataCommand`` for more details.
Example
~~~~~~~
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
indicate which side is sending, these are *not* included on the wire::
* connection established *
> SERVER localhost:8823
> PING 1490197665618
< NAME synapse.app.appservice
< PING 1490197665618
< REPLICATE events 1
< REPLICATE backfill 1
< REPLICATE caches 1
> POSITION events 1
> POSITION backfill 1
> POSITION caches 1
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
< PING 1490197675618
> ERROR server stopping
* connection closed by server *
The ``POSITION`` command sent by the server is used to set the clients position
without needing to send data with the ``RDATA`` command.
An example of a batched set of ``RDATA`` is::
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
In this case the client shouldn't advance their caches token until it sees the
the last ``RDATA``.
List of commands
~~~~~~~~~~~~~~~~
The list of valid commands, with which side can send it: server (S) or client (C):
SERVER (S)
Sent at the start to identify which server the client is talking to
RDATA (S)
A single update in a stream
POSITION (S)
The position of the stream has been updated
ERROR (S, C)
There was an error
PING (S, C)
Sent periodically to ensure the connection is still alive
NAME (C)
Sent at the start by client to inform the server who they are
REPLICATE (C)
Asks the server to replicate a given stream
USER_SYNC (C)
A user has started or stopped syncing
FEDERATION_ACK (C)
Acknowledge receipt of some federation data
REMOVE_PUSHER (C)
Inform the server a pusher should be removed
INVALIDATE_CACHE (C)
Inform the server a cache should be invalidated
SYNC (S, C)
Used exclusively in tests
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
format of each command.

Some files were not shown because too many files have changed in this diff Show More