1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Erik Johnston
6affde172c Replace some ujson with simplejson to make it work 2018-03-15 23:38:43 +00:00
521 changed files with 16815 additions and 38243 deletions

View File

@@ -1,159 +0,0 @@
version: 2
jobs:
dockerhubuploadrelease:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_TAG .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:$CIRCLE_TAG
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:$CIRCLE_SHA1 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker tag matrixdotorg/synapse:$CIRCLE_SHA1 matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:$CIRCLE_SHA1
- run: docker push matrixdotorg/synapse:latest
sytestpy2:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2postgres:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy2postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy2
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3postgres:
machine: true
steps:
- checkout
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3merged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
sytestpy3postgresmerged:
machine: true
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: docker pull matrixdotorg/sytest-synapsepy3
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
- store_artifacts:
path: ~/project/logs
destination: logs
- store_test_results:
path: logs
workflows:
version: 2
build:
jobs:
- sytestpy2:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:
only: /^v[0-9].[0-9]+.[0-9]+(.[0-9]+)?/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:
only: master

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -e
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is!"
exit 1
fi
# Get the reference, using the GitHub API
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
# Show what we are before
git show -s
# Set up username so it can do a merge
git config --global user.email bot@matrix.org
git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
git merge --no-edit origin/$GITBASE
# Show what we are after.
git show -s

View File

@@ -1,7 +0,0 @@
Dockerfile
.travis.yml
.gitignore
demo/etc
tox.ini
.git/*
.tox/*

View File

@@ -27,9 +27,8 @@ Describe here the problem that you are experiencing, or the feature you are requ
Describe how what happens differs from what you expected.
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
If you can identify any relevant log snippets from _homeserver.log_, please include
those here (please be careful to remove any personal or private data):
### Version information

9
.gitignore vendored
View File

@@ -1,11 +1,8 @@
*.pyc
.*.swp
*~
*.lock
.DS_Store
_trial_temp/
_trial_temp*/
logs/
dbs/
*.egg
@@ -16,7 +13,6 @@ docs/build/
cmdclient_config.json
homeserver*.db
homeserver*.log
homeserver*.log.*
homeserver*.pid
homeserver*.yaml
@@ -36,7 +32,6 @@ demo/media_store.*
demo/etc
uploads
cache
.idea/
media_store/
@@ -44,9 +39,6 @@ media_store/
*.tac
build/
venv/
venv*/
*venv/
localhost-800*/
static/client/register/register_config.js
@@ -56,4 +48,3 @@ env/
*.config
.vscode/
.ropeproject/

View File

@@ -1,49 +1,14 @@
sudo: false
language: python
python: 2.7
# tell travis to cache ~/.cache/pip
cache: pip
before_script:
- git remote set-branches --add origin develop
- git fetch origin develop
matrix:
fast_finish: true
include:
- python: 2.7
env: TOX_ENV=packaging
- python: 2.7
env: TOX_ENV=pep8
- python: 2.7
env: TOX_ENV=py27
- python: 2.7
env: TOX_ENV=py27-old
- python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.5
env: TOX_ENV=py35
- python: 3.6
env: TOX_ENV=py36
- python: 3.6
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.6
env: TOX_ENV=check_isort
- python: 3.6
env: TOX_ENV=check-newsfragment
env:
- TOX_ENV=packaging
- TOX_ENV=pep8
- TOX_ENV=py27
install:
- pip install tox

View File

@@ -60,9 +60,3 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix

2782
CHANGES.md

File diff suppressed because it is too large Load Diff

2328
CHANGES.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -30,28 +30,8 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
pull requests to synapse get automatically tested by Travis and CircleCI.
If your change breaks the build, this will be shown in GitHub, so please
keep an eye on the pull request for feedback.
To run unit tests in a local development environment, you can use:
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
SQLite-backed Synapse on Python 2.7.
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the `documentation in the SyTest repo
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
information.
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
Code style
~~~~~~~~~~
@@ -64,27 +44,6 @@ Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Changelog
~~~~~~~~~
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain Markdown
formatting. Adding credits to the changelog is encouraged, we value
your contributions and would like to have you shouted out in the
release notes!
For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix".
Attribution
~~~~~~~~~~~
@@ -93,8 +52,7 @@ AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and
we'll try to fix it :)
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
Sign off
~~~~~~~~
@@ -143,27 +101,18 @@ the contribution or otherwise have the right to contribute it to Matrix::
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
We accept contributions under a legally identifiable name, such as
your name on government documentation or common-law names (names
claimed by legitimate usage or repute). Unfortunately, we cannot
accept anonymous contributions at this time.
Git allows you to add this signoff automatically when using the ``-s``
flag to ``git commit``, which uses the name and email set in your
``user.name`` and ``user.email`` git configs.
...using your real name; unfortunately pseudonyms and anonymous contributions
can't be accepted. Git makes this trivial - just use the -s flag when you do
``git commit``, having first set ``user.name`` and ``user.email`` git configs
(which you should have done anyway :)
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
matrix together all the fragmented communication technologies out there we are
reliant on contributions and collaboration from the community to do so. So
please get involved - and we hope you have as much fun hacking on Matrix as we
do!
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!

View File

@@ -2,7 +2,6 @@ include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
@@ -26,15 +25,7 @@ recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
exclude jenkins*
exclude Dockerfile
exclude .dockerignore
exclude test_postgresql.sh
recursive-exclude jenkins *.sh
include pyproject.toml
recursive-include changelog.d *
prune .github
prune demo/etc
prune docker
prune .circleci

View File

@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
Thanks for using Matrix!
@@ -157,15 +157,12 @@ if you prefer.
In case of problems, please see the _`Troubleshooting` section below.
There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
this including configuration options is available in the README on
hub.docker.com.
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
for details.
Configuring synapse
-------------------
@@ -285,7 +282,7 @@ Connecting to Synapse from a client
The easiest way to try out your new Synapse installation is by connecting to it
from a web client. The easiest option is probably the one at
https://riot.im/app. You will need to specify a "Custom server" when you log on
http://riot.im/app. You will need to specify a "Custom server" when you log on
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
@@ -331,7 +328,7 @@ Security Note
=============
Matrix serves raw user generated data in some APIs - specifically the `content
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
Whilst we have tried to mitigate against possible XSS attacks (e.g.
https://github.com/matrix-org/synapse/pull/1021) we recommend running
@@ -350,33 +347,16 @@ Platform-Specific Instructions
Debian
------
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
Note that these packages do not include a client - choose one from
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
Fedora
------
Synapse is in the Fedora repositories as ``matrix-synapse``::
sudo dnf install matrix-synapse
Oleg Girko provides Fedora RPMs at
https://obs.infoserver.lv/project/monitor/matrix-synapse
OpenSUSE
--------
Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
sudo zypper install matrix-synapse
SUSE Linux Enterprise Server
----------------------------
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
ArchLinux
---------
@@ -459,13 +439,37 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
Windows Install
---------------
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
- gcc
- git
- libffi-devel
- openssl (and openssl-devel, python-openssl)
- python
- python-setuptools
The content repository requires additional packages and will be unable to process
uploads without them:
- libjpeg8
- libjpeg8-devel
- zlib
If you choose to install Synapse without these packages, you will need to reinstall
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
pillow --user``
Troubleshooting:
- You may need to upgrade ``setuptools`` to get this to work correctly:
``pip install setuptools --upgrade``.
- You may encounter errors indicating that ``ffi.h`` is missing, even with
``libffi-devel`` installed. If you do, copy the ``.h`` files:
``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include``
- You may need to install libsodium from source in order to install PyNacl. If
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
Linux provides a Linux environment on Windows 10 which is capable of using the
Debian, Fedora, or source installation methods. More information about WSL can
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
for Windows Server.
Troubleshooting
===============
@@ -515,7 +519,7 @@ Troubleshooting Running
-----------------------
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
encryption and digital signatures.
Unfortunately PyNACL currently has a few issues
(https://github.com/pyca/pynacl/issues/53) and
@@ -606,9 +610,6 @@ should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
$ dig -t srv _matrix._tcp.example.com
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
Note that the server hostname cannot be an alias (CNAME record): it has to point
directly to the server hosting the synapse instance.
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
its user-ids, by setting ``server_name``::
@@ -663,8 +664,8 @@ useful just for development purposes. See `<demo/README>`_.
Using PostgreSQL
================
As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
traditionally used for convenience and simplicity.
The advantages of Postgres include:
@@ -688,7 +689,7 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
@@ -718,18 +719,6 @@ so an example nginx configuration might look like::
}
}
and an example apache configuration may look like::
<VirtualHost *:443>
SSLEngine on
ServerName matrix.example.com;
<Location /_matrix>
ProxyPass http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse http://127.0.0.1:8008/_matrix
</Location>
</VirtualHost>
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
recorded correctly.
@@ -884,7 +873,7 @@ to install using pip and a virtualenv::
virtualenv -p python2.7 env
source env/bin/activate
python -m synapse.python_dependencies | xargs pip install
python synapse/python_dependencies.py | xargs pip install
pip install lxml mock
This will run a process of downloading and installing all the needed
@@ -901,17 +890,6 @@ This should end with a 'PASSED' result::
PASSED (successes=143)
Running the Integration Tests
=============================
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
a Matrix homeserver integration testing suite, which uses HTTP requests to
access the API as a Matrix client would. It is able to run Synapse directly from
the source tree, so installation of the server is not required.
Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `installation instructions
<https://github.com/matrix-org/sytest#installing>`_ for details.
Building Internal API Documentation
===================================
@@ -939,13 +917,5 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to
degrade.
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
improvement in overall amount, and especially in terms of giving back RAM
to the OS. To use it, the library must simply be put in the LD_PRELOAD
environment variable when launching Synapse. On Debian, this can be done
by installing the ``libjemalloc1`` package and adding this line to
``/etc/default/matrix-synapse``::
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys

View File

@@ -48,18 +48,6 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to $NEXT_VERSION
====================
This release expands the anonymous usage stats sent if the opt-in
``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
We would appreciate it if you could assist by ensuring this module is available
and ``report_stats`` is enabled. This will let us see if performance changes to
synapse are having an impact to the general community.
Upgrading to v0.15.0
====================

View File

@@ -1 +0,0 @@
!.gitignore

View File

@@ -1 +0,0 @@
Fix problem when playing media from Chrome using direct URL (thanks @remjey!)

View File

@@ -1,2 +0,0 @@
Unit tests can now be run under PostgreSQL in Docker using
``test_postgresql.sh``.

View File

@@ -1 +0,0 @@
Speed up calculation of typing updates for replication

View File

@@ -1 +0,0 @@
support registering regular users non-interactively with register_new_matrix_user script

View File

@@ -1 +0,0 @@
Fix broken invite email links for self hosted riots

View File

@@ -1,2 +0,0 @@
Remove documentation regarding installation on Cygwin, the use of WSL is
recommended instead.

View File

@@ -1 +0,0 @@
Don't ratelimit autojoins

View File

@@ -1 +0,0 @@
Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables.

View File

@@ -1 +0,0 @@
Fix 500 error when deleting unknown room alias

View File

@@ -1 +0,0 @@
Fix some b'abcd' noise in logs and metrics

View File

@@ -1 +0,0 @@
Report "python_version" in the phone home stats

View File

@@ -1 +0,0 @@
Fix some b'abcd' noise in logs and metrics

View File

@@ -1 +0,0 @@
Fix typo in README, synaspse -> synapse

View File

@@ -1 +0,0 @@
When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers.

View File

@@ -1 +0,0 @@
Increase the timeout when filling missing events in federation requests

View File

@@ -1 +0,0 @@
Improve the logging when handling a federation transaction

View File

@@ -1 +0,0 @@
Improve logging of outbound federation requests

View File

@@ -1 +0,0 @@
Fix incorrect server-name indication for outgoing federation requests

View File

@@ -1 +0,0 @@
Fix adding client IPs to the database failing on Python 3.

View File

@@ -1 +0,0 @@
Improve logging of outbound federation requests

View File

@@ -1 +0,0 @@
Fix bug where things occaisonally were not being timed out correctly.

View File

@@ -1 +0,0 @@
Fix the docker image building on python 3

View File

@@ -1 +0,0 @@
Add a regression test for logging failed HTTP requests on Python 3.

View File

@@ -1 +0,0 @@
Fix bug where outbound federation would stop talking to some servers when using workers

View File

@@ -1 +0,0 @@
Always LL ourselves if we're in a room

View File

@@ -1 +0,0 @@
Comments and interface cleanup for on_receive_pdu

View File

@@ -1 +0,0 @@
Fix spurious exceptions when remote http client closes conncetion

View File

@@ -1 +0,0 @@
Log exceptions thrown by background tasks

View File

@@ -1 +0,0 @@
Fix some instances of ExpiringCache not expiring cache items

View File

@@ -1 +0,0 @@
Fix out-of-bounds error when LLing yourself

View File

@@ -1 +0,0 @@
Automate pushes to docker hub

View File

@@ -1 +0,0 @@
Require attrs 16.0.0 or later

View File

@@ -1 +0,0 @@
Fix incompatibility with python3 on alpine

View File

@@ -1 +0,0 @@
Run the test suite on the oldest supported versions of our dependencies in CI.

View File

@@ -1 +0,0 @@
Fix exceptions from metrics handler

View File

@@ -1 +0,0 @@
CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches.

View File

@@ -1 +0,0 @@
Fix docstrings and add tests for state store methods

View File

@@ -1 +0,0 @@
Include eventid in log lines when processing incoming federation transactions

View File

@@ -1 +0,0 @@
Fix errors due to concurrent monthly_active_user upserts

View File

@@ -1 +0,0 @@
fix docstring for FederationClient.get_state_for_room

View File

@@ -1 +0,0 @@
Run notify_app_services as a bg process

View File

@@ -1 +0,0 @@
Improve the logging when handling a federation transaction

View File

@@ -1 +0,0 @@
Clarifications in FederationHandler

View File

@@ -1 +0,0 @@
Replaced all occurences of e.message with str(e). Contributed by Schnuffle

View File

@@ -1,10 +0,0 @@
Community Contributions
=======================
Everything in this directory are projects submitted by the community that may be useful
to others. As such, the project maintainers cannot guarantee support, stability
or backwards compatibility of these projects.
Files in this directory should *not* be relied on directly, as they may not
continue to work or exist in future. If you wish to use any of these files then
they should be copied to avoid them breaking from underneath you.

View File

@@ -1,41 +0,0 @@
# Synapse Docker
### Automated configuration
It is recommended that you use Docker Compose to run your containers, including
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
including example labels for reverse proxying and other artifacts.
Read the section about environment variables and set at least mandatory variables,
then run the server:
```
docker-compose up -d
```
If secrets are not specified in the environment variables, they will be generated
as part of the startup. Please ensure these secrets are kept between launches of the
Docker container, as their loss may require users to log in again.
### Manual configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
```
Then, customize your configuration and run the server:
```
docker-compose up -d
```
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)

View File

@@ -1,50 +0,0 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
version: '3'
services:
synapse:
build: ../..
image: docker.io/matrixdotorg/synapse:latest
# Since snyapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
environment:
- SYNAPSE_SERVER_NAME=my.matrix.host
- SYNAPSE_REPORT_STATS=no
- SYNAPSE_ENABLE_REGISTRATION=yes
- SYNAPSE_LOG_LEVEL=INFO
- POSTGRES_PASSWORD=changeme
volumes:
# You may either store all the files in a local folder
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
- db
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8448
db:
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postfesql/data

View File

@@ -1,6 +0,0 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up additional recording rules

File diff suppressed because it is too large Load Diff

View File

@@ -22,8 +22,6 @@ import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from six import string_types
def make_graph(file_name, room_id, file_prefix, limit):
print "Reading lines"
@@ -60,7 +58,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, string_types):
elif isinstance(value, basestring):
pass
else:
value = json.dumps(value)

View File

@@ -202,11 +202,11 @@ new PromConsole.Graph({
<h1>Requests</h1>
<h3>Requests by Servlet</h3>
<div id="synapse_http_server_request_count_servlet"></div>
<div id="synapse_http_server_requests_servlet"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_request_count:servlet[2m])",
node: document.querySelector("#synapse_http_server_requests_servlet"),
expr: "rate(synapse_http_server_requests:servlet[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -215,11 +215,11 @@ new PromConsole.Graph({
})
</script>
<h4>&nbsp;(without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
<div id="synapse_http_server_requests_servlet_minus_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -233,7 +233,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -276,7 +276,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -291,7 +291,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -306,7 +306,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,

View File

@@ -1,10 +1,10 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])

View File

@@ -5,19 +5,19 @@ groups:
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
- record: 'synapse_http_server_requests:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
expr: "sum(synapse_http_server_requests) by (method)"
- record: 'synapse_http_server_requests:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
expr: 'sum(synapse_http_server_requests) by (servlet)'
- record: 'synapse_http_server_request_count:total'
- record: 'synapse_http_server_requests:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
expr: 'sum(synapse_http_server_requests:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'

View File

@@ -2,9 +2,6 @@
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
# rather than in a user home directory or similar under virtualenv.
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
[Unit]
Description=Synapse Matrix homeserver
@@ -15,7 +12,6 @@ Group=synapse
WorkingDirectory=/var/lib/synapse
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
[Install]
WantedBy=multi-user.target

View File

@@ -1,42 +0,0 @@
ARG PYTHON_VERSION=2
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
COPY . /synapse
RUN apk add --no-cache --virtual .build_deps \
build-base \
libffi-dev \
libjpeg-turbo-dev \
libressl-dev \
libxslt-dev \
linux-headers \
postgresql-dev \
zlib-dev \
&& cd /synapse \
&& apk add --no-cache --virtual .runtime_deps \
libffi \
libjpeg-turbo \
libressl \
libxslt \
libpq \
zlib \
su-exec \
&& pip install --upgrade \
lxml \
pip \
psycopg2 \
setuptools \
&& mkdir -p /synapse/cache \
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
&& mv /synapse/docker/start.py /synapse/docker/conf / \
&& rm -rf \
setup.cfg \
setup.py \
synapse \
&& apk del .build_deps
VOLUME ["/data"]
EXPOSE 8008/tcp 8448/tcp
ENTRYPOINT ["/start.py"]

View File

@@ -1,12 +0,0 @@
# Use the Sytest image that comes with a lot of the build dependencies
# pre-installed
FROM matrixdotorg/sytest:latest
# The Sytest image doesn't come with python, so install that
RUN apt-get -qq install -y python python-dev python-pip
# We need tox to run the tests in run_pg_tests.sh
RUN pip install tox
ADD run_pg_tests.sh /pg_tests.sh
ENTRYPOINT /pg_tests.sh

View File

@@ -1,125 +0,0 @@
# Synapse Docker
This Docker image will run Synapse as a single process. It does not provide a database
server or a TURN server, you should run these separately.
## Run
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
We instead release only tagged versions so upgrading between releases is entirely
within your control.
### Using docker-compose (easier)
This image is designed to run either with an automatically generated configuration
file or with a custom configuration that requires manual editing.
An easy way to make use of this image is via docker-compose. See the
[contrib/docker](../contrib/docker)
section of the synapse project for examples.
### Without Compose (harder)
If you do not wish to use Compose, you may still run this image using plain
Docker commands. Note that the following is just a guideline and you may need
to add parameters to the docker run command to account for the network situation
with your postgres database.
```
docker run \
-d \
--name synapse \
-v ${DATA_PATH}:/data \
-e SYNAPSE_SERVER_NAME=my.matrix.host \
-e SYNAPSE_REPORT_STATS=yes \
docker.io/matrixdotorg/synapse:latest
```
## Volumes
The image expects a single volume, located at ``/data``, that will hold:
* temporary files during uploads;
* uploaded media and thumbnails;
* the SQLite database if you do not configure postgres;
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
disposal. For instance, ``/data/media`` coud be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
In order to setup an application service, simply create an ``appservices``
directory in the data volume and write the application service Yaml
configuration file there. Multiple application services are supported.
## Environment
Unless you specify a custom path for the configuration file, a very generic
file will be generated, based on the following environment settings.
These are a good starting point for setting up your own deployment.
Global settings:
* ``UID``, the user id Synapse will run as [default 991]
* ``GID``, the group id Synapse will run as [default 991]
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
then customize it manually. No other environment variable is required.
Otherwise, a dynamic configuration file will be used. The following environment
variables are available for configuration:
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
statistics reporting back to the Matrix project which helps us to get funding.
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
you run your own TLS-capable reverse proxy).
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
the Synapse instance.
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
key in order to enable recaptcha upon registration.
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
key in order to enable recaptcha upon registration.
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
uris to enable TURN for this homeserver.
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
Shared secrets, that will be initialized to random values if not set:
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
registration is disable.
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
to the server.
Database specific values (will use SQLite if not set):
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
Mail server specific values (will not send emails if not set):
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
## Build
Build the docker image with the `docker build` command from the root of the synapse repository.
```
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
```
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
You may have a local Python wheel cache available, in which case copy the relevant
packages in the ``cache/`` directory at the root of the project.

View File

@@ -1,219 +0,0 @@
# vim:ft=yaml
## TLS ##
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
tls_fingerprints: []
## Server ##
server_name: "{{ SYNAPSE_SERVER_NAME }}"
pid_file: /homeserver.pid
web_client: False
soft_file_limit: 0
## Ports ##
listeners:
{% if not SYNAPSE_NO_TLS %}
-
port: 8448
bind_addresses: ['0.0.0.0']
type: http
tls: true
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation] # Federation APIs
compress: false
{% endif %}
- port: 8008
tls: false
bind_addresses: ['0.0.0.0']
type: http
x_forwarded: false
resources:
- names: [client]
compress: true
- names: [federation]
compress: false
## Database ##
{% if POSTGRES_PASSWORD %}
database:
name: "psycopg2"
args:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
cp_min: 5
cp_max: 10
{% else %}
database:
name: "sqlite3"
args:
database: "/data/homeserver.db"
{% endif %}
## Performance ##
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
verbose: 0
log_file: "/data/homeserver.log"
log_config: "/compiled/log.config"
## Ratelimiting ##
rc_messages_per_second: 0.2
rc_message_burst_count: 10.0
federation_rc_window_size: 1000
federation_rc_sleep_limit: 10
federation_rc_sleep_delay: 500
federation_rc_reject_limit: 50
federation_rc_concurrent: 3
## Files ##
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false
# List of thumbnail to precalculate when an image is uploaded.
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 320
height: 240
method: scale
- width: 640
height: 480
method: scale
- width: 800
height: 600
method: scale
url_preview_enabled: False
max_spider_size: "10M"
## Captcha ##
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
enable_registration_captcha: True
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% else %}
recaptcha_public_key: "YOUR_PUBLIC_KEY"
recaptcha_private_key: "YOUR_PRIVATE_KEY"
enable_registration_captcha: False
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
{% endif %}
## Turn ##
{% if SYNAPSE_TURN_URIS %}
turn_uris:
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
{% endfor %}
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% else %}
turn_uris: []
turn_shared_secret: "YOUR_SHARED_SECRET"
turn_user_lifetime: "1h"
turn_allow_guests: True
{% endif %}
## Registration ##
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
bcrypt_rounds: 12
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
enable_group_creation: true
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
- riot.im
## Metrics ###
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
enable_metrics: True
report_stats: True
{% else %}
enable_metrics: False
report_stats: False
{% endif %}
## API Configuration ##
room_invite_state_types:
- "m.room.join_rules"
- "m.room.canonical_alias"
- "m.room.avatar"
- "m.room.name"
{% if SYNAPSE_APPSERVICES %}
app_service_config_files:
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
{% endfor %}
{% else %}
app_service_config_files: []
{% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
expire_access_token: False
## Signing Keys ##
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
old_signing_keys: {}
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
password_config:
enabled: true
{% if SYNAPSE_SMTP_HOST %}
email:
enable_notifs: false
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
require_transport_security: False
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
app_name: Matrix
template_dir: res/templates
notif_template_html: notif_mail.html
notif_template_text: notif_mail.txt
notif_for_new_users: True
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
{% endif %}

View File

@@ -1,29 +0,0 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
root:
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
handlers: [console]

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# This script runs the PostgreSQL tests inside a Docker container. It expects
# the relevant source files to be mounted into /src (done automatically by the
# caller script). It will set up the database, run it, and then use the tox
# configuration to run the tests.
set -e
# Set PGUSER so Synapse's tests know what user to connect to the database with
export PGUSER=postgres
# Initialise & start the database
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
# Run the tests
cd /src
export TRIAL_FLAGS="-j 4"
tox --workdir=/tmp -e py27-postgres

View File

@@ -1,67 +0,0 @@
#!/usr/local/bin/python
import jinja2
import os
import sys
import subprocess
import glob
import codecs
# Utility functions
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
def check_arguments(environ, args):
for argument in args:
if argument not in environ:
print("Environment variable %s is mandatory, exiting." % argument)
sys.exit(2)
def generate_secrets(environ, secrets):
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
if os.path.exists(filename):
with open(filename) as handle: value = handle.read()
else:
print("Generating a random secret for {}".format(name))
value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle: handle.write(value)
environ[secret] = value
# Prepare the configuration
mode = sys.argv[1] if len(sys.argv) > 1 else None
environ = os.environ.copy()
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
args = ["python", "-m", "synapse.app.homeserver"]
# In generate mode, generate a configuration, missing keys, then exit
if mode == "generate":
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
args += [
"--server-name", environ["SYNAPSE_SERVER_NAME"],
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
"--generate-config"
]
os.execv("/usr/local/bin/python", args)
# In normal mode, generate missing keys if any, then run synapse
else:
# Parse the configuration file
if "SYNAPSE_CONFIG_PATH" in environ:
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
else:
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
generate_secrets(environ, {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
})
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
if not os.path.exists("/compiled"): os.mkdir("/compiled")
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
convert("/conf/log.config", "/compiled/log.config", environ)
subprocess.check_output(["chown", "-R", ownership, "/data"])
args += ["--config-path", "/compiled/homeserver.yaml"]
# Generate missing keys and start synapse
subprocess.check_output(args + ["--generate-keys"])
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)

View File

@@ -8,56 +8,20 @@ Depending on the amount of history being purged a call to the API may take
several minutes or longer. During this period users will not be able to
paginate further back in the room from the point being purged from.
The API is:
The API is simply:
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
including an ``access_token`` of a server admin.
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
deleted.)
deleted, and room state data before the cutoff is always removed).
Room state data (such as joins, leaves, topic) is always preserved.
To delete local message events as well, set ``delete_local_events`` in the body:
To delete local events as well, set ``delete_local_events`` in the body:
.. code:: json
{
"delete_local_events": true
}
The caller must specify the point in the room to purge up to. This can be
specified by including an event_id in the URI, or by setting a
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
id is given, that event (and others at the same graph depth) will be retained.
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
in milliseconds.
The API starts the purge running, and returns immediately with a JSON body with
a purge id:
.. code:: json
{
"purge_id": "<opaque id>"
}
Purge status query
------------------
It is possible to poll for updates on recent purges with a second API;
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
(again, with a suitable ``access_token``). This API returns a JSON body like
the following:
.. code:: json
{
"status": "active"
}
The status will be one of ``active``, ``complete``, or ``failed``.

View File

@@ -1,63 +0,0 @@
Shared-Secret Registration
==========================
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(``registration_shared_secret`` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
To fetch the nonce, you need to request one from the API::
> GET /_matrix/client/r0/admin/register
< {"nonce": "thisisanonce"}
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
body containing the nonce, username, password, whether they are an admin
(optional, False by default), and a HMAC digest of the content.
As an example::
> POST /_matrix/client/r0/admin/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
"password": "pizza",
"admin": true,
"mac": "mac_digest_here"
}
< {
"access_token": "token_here",
"user_id": "@pepper_roni:localhost",
"home_server": "test",
"device_id": "device_id_here"
}
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, and either
the string "admin" or "notadmin", each separated by NULs. For an example of
generation in Python::
import hmac, hashlib
def generate_mac(nonce, user, password, admin=False):
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce.encode('utf8'))
mac.update(b"\x00")
mac.update(user.encode('utf8'))
mac.update(b"\x00")
mac.update(password.encode('utf8'))
mac.update(b"\x00")
mac.update(b"admin" if admin else b"notadmin")
return mac.hexdigest()

View File

@@ -44,26 +44,13 @@ Deactivate Account
This API deactivates an account. It removes active access tokens, resets the
password, and deletes third-party IDs (to prevent the user requesting a
password reset). It can also mark the user as GDPR-erased (stopping their data
from distributed further, and deleting it entirely if there are no other
references to it).
password reset).
The api is::
POST /_matrix/client/r0/admin/deactivate/<user_id>
with a body of:
.. code:: json
{
"erase": true
}
including an ``access_token`` of a server admin.
The erase parameter is optional and defaults to 'false'.
An empty body may be passed for backwards compatibility.
including an ``access_token`` of a server admin, and an empty request body.
Reset password

View File

@@ -16,7 +16,7 @@
print("I am a fish %s" %
"moo")
and this::
and this::
print(
"I am a fish %s" %

View File

@@ -1,160 +0,0 @@
Support in Synapse for tracking agreement to server terms and conditions
========================================================================
Synapse 0.30 introduces support for tracking whether users have agreed to the
terms and conditions set by the administrator of a server - and blocking access
to the server until they have.
There are several parts to this functionality; each requires some specific
configuration in `homeserver.yaml` to be enabled.
Note that various parts of the configuation and this document refer to the
"privacy policy": agreement with a privacy policy is one particular use of this
feature, but of course adminstrators can specify other terms and conditions
unrelated to "privacy" per se.
Collecting policy agreement from a user
---------------------------------------
Synapse can be configured to serve the user a simple policy form with an
"accept" button. Clicking "Accept" records the user's acceptance in the
database and shows a success page.
To enable this, first create templates for the policy and success pages.
These should be stored on the local filesystem.
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
and [docs/privacy_policy_templates](privacy_policy_templates) gives
examples of the sort of thing that can be done.
Note that the templates must be stored under a name giving the language of the
template - currently this must always be `en` (for "English");
internationalisation support is intended for the future.
The template for the policy itself should be versioned and named according to
the version: for example `1.0.html`. The version of the policy which the user
has agreed to is stored in the database.
Once the templates are in place, make the following changes to `homeserver.yaml`:
1. Add a `user_consent` section, which should look like:
```yaml
user_consent:
template_dir: privacy_policy_templates
version: 1.0
```
`template_dir` points to the directory containing the policy
templates. `version` defines the version of the policy which will be served
to the user. In the example above, Synapse will serve
`privacy_policy_templates/en/1.0.html`.
2. Add a `form_secret` setting at the top level:
```yaml
form_secret: "<unique secret>"
```
This should be set to an arbitrary secret string (try `pwgen -y 30` to
generate suitable secrets).
More on what this is used for below.
3. Add `consent` wherever the `client` resource is currently enabled in the
`listeners` configuration. For example:
```yaml
listeners:
- port: 8008
resources:
- names:
- client
- consent
```
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
python-jinja2`.
Once this is complete, and the server has been restarted, try visiting
`https://<server>/_matrix/consent`. If correctly configured, this should give
an error "Missing string query parameter 'u'". It is now possible to manually
construct URIs where users can give their consent.
### Constructing the consent URI
It may be useful to manually construct the "consent URI" for a given user - for
instance, in order to send them an email asking them to consent. To do this,
take the base `https://<server>/_matrix/consent` URL and add the following
query parameters:
* `u`: the user id of the user. This can either be a full MXID
(`@user:server.com`) or just the localpart (`user`).
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
possible to calculate this on the commandline with something like:
```bash
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
```
This should result in a URI which looks something like:
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
Sending users a server notice asking them to agree to the policy
----------------------------------------------------------------
It is possible to configure Synapse to send a [server
notice](server_notices.md) to anybody who has not yet agreed to the current
version of the policy. To do so:
* ensure that the consent resource is configured, as in the previous section
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
example:
```yaml
user_consent:
server_notice_content:
msgtype: m.text
body: >-
Please give your consent to the privacy policy at %(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the server notice.)
Blocking users from using the server until they agree to the policy
-------------------------------------------------------------------
Synapse can be configured to block any attempts to join rooms or send messages
until the user has given their agreement to the policy. (Joining the server
notices room is exempted from this).
To enable this, add `block_events_error` under `user_consent`. For example:
```yaml
user_consent:
block_events_error: >-
You can't send any messages until you consent to the privacy policy at
%(consent_uri)s.
```
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
consent uri for that user.
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
URI that clients use to connect to the server. (It is used to construct
`consent_uri` in the error.)

View File

@@ -279,9 +279,9 @@ Obviously that option means that the operations done in
that might be fixed by setting a different logcontext via a ``with
LoggingContext(...)`` in ``background_operation``).
The second option is to use ``logcontext.run_in_background``, which wraps a
function so that it doesn't reset the logcontext even when it returns an
incomplete deferred, and adds a callback to the returned deferred to reset the
The second option is to use ``logcontext.preserve_fn``, which wraps a function
so that it doesn't reset the logcontext even when it returns an incomplete
deferred, and adds a callback to the returned deferred to reset the
logcontext. In other words, it turns a function that follows the Synapse rules
about logcontexts and Deferreds into one which behaves more like an external
function — the opposite operation to that described in the previous section.
@@ -293,7 +293,7 @@ It can be used like this:
def do_request_handling():
yield foreground_operation()
logcontext.run_in_background(background_operation)
logcontext.preserve_fn(background_operation)()
# this will now be logged against the request context
logger.debug("Request handling complete")

View File

@@ -1,43 +0,0 @@
Using the synapse manhole
=========================
The "manhole" allows server administrators to access a Python shell on a running
Synapse installation. This is a very powerful mechanism for administration and
debugging.
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`:
```yaml
listeners:
- port: 9000
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
(`bind_addresses` in the above is important: it ensures that access to the
manhole is only possible for local users).
Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
Then restart synapse, and point an ssh client at port 9000 on localhost, using
the username `matrix`:
```bash
ssh -p9000 matrix@localhost
```
The password is `rabbithole`.
This gives a Python REPL in which `hs` gives access to the
`synapse.server.HomeServer` object - which in turn gives access to many other
parts of the process.
As a simple example, retrieving an event from the database:
```
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
```

View File

@@ -1,47 +1,25 @@
How to monitor Synapse metrics using Prometheus
===============================================
1. Install Prometheus:
1. Install prometheus:
Follow instructions at http://prometheus.io/docs/introduction/install/
2. Enable Synapse metrics:
2. Enable synapse metrics:
There are two methods of enabling metrics in Synapse.
Simply setting a (local) port number will enable it. Pick a port.
prometheus itself defaults to 9090, so starting just above that for
locally monitored services seems reasonable. E.g. 9092:
The first serves the metrics as a part of the usual web server and can be
enabled by adding the "metrics" resource to the existing listener as such::
Add to homeserver.yaml::
resources:
- names:
- client
- metrics
metrics_port: 9092
This provides a simple way of adding metrics to your Synapse installation,
and serves under ``/_synapse/metrics``. If you do not wish your metrics be
publicly exposed, you will need to either filter it out at your load
balancer, or use the second method.
Also ensure that ``enable_metrics`` is set to ``True``.
The second method runs the metrics server on a different port, in a
different thread to Synapse. This can make it more resilient to heavy load
meaning metrics cannot be retrieved, and can be exposed to just internal
networks easier. The served metrics are available over HTTP only, and will
be available at ``/``.
Restart synapse.
Add a new listener to homeserver.yaml::
listeners:
- type: metrics
port: 9000
bind_addresses:
- '0.0.0.0'
For both options, you will need to ensure that ``enable_metrics`` is set to
``True``.
Restart Synapse.
3. Add a Prometheus target for Synapse.
3. Add a prometheus target for synapse.
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
@@ -53,50 +31,7 @@ How to monitor Synapse metrics using Prometheus
If your prometheus is older than 1.5.2, you will need to replace
``static_configs`` in the above with ``target_groups``.
Restart Prometheus.
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
All time duration-based metrics have been changed to be seconds. This affects:
+----------------------------------+
| msec -> sec metrics |
+==================================+
| python_gc_time |
+----------------------------------+
| python_twisted_reactor_tick_time |
+----------------------------------+
| synapse_storage_query_time |
+----------------------------------+
| synapse_storage_schedule_time |
+----------------------------------+
| synapse_storage_transaction_time |
+----------------------------------+
Several metrics have been changed to be histograms, which sort entries into
buckets and allow better analysis. The following metrics are now histograms:
+-------------------------------------------+
| Altered metrics |
+===========================================+
| python_gc_time |
+-------------------------------------------+
| python_twisted_reactor_pending_calls |
+-------------------------------------------+
| python_twisted_reactor_tick_time |
+-------------------------------------------+
| synapse_http_server_response_time_seconds |
+-------------------------------------------+
| synapse_storage_query_time |
+-------------------------------------------+
| synapse_storage_schedule_time |
+-------------------------------------------+
| synapse_storage_transaction_time |
+-------------------------------------------+
Restart prometheus.
Block and response metrics renamed for 0.27.0

View File

@@ -6,22 +6,16 @@ Postgres version 9.4 or later is known to work.
Set up database
===============
Assuming your PostgreSQL database user is called ``postgres``, create a user
``synapse_user`` with::
su - postgres
createuser --pwprompt synapse_user
The PostgreSQL database used *must* have the correct encoding set, otherwise it
The PostgreSQL database used *must* have the correct encoding set, otherwise
would not be able to store UTF8 strings. To create a database with the correct
encoding use, e.g.::
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
@@ -52,8 +46,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
Synapse config
==============
When you are ready to start using PostgreSQL, edit the ``database`` section in
your config file to match the following lines::
When you are ready to start using PostgreSQL, add the following line to your
config file::
database:
name: psycopg2
@@ -102,12 +96,9 @@ complete, restart synapse. For instance::
cp homeserver.db homeserver.db.snapshot
./synctl start
Copy the old config file into a new config file::
cp homeserver.yaml homeserver-postgres.yaml
Edit the database section as described in the section *Synapse config* above
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
Assuming your new config file (as described in the section *Synapse config*)
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
``homeserver.db.snapshot`` then simply run::
synapse_port_db --sqlite-database homeserver.db.snapshot \
--postgres-config homeserver-postgres.yaml
@@ -126,11 +117,6 @@ run::
--postgres-config homeserver-postgres.yaml
Once that has completed, change the synapse config to point at the PostgreSQL
database configuration file ``homeserver-postgres.yaml``::
./synctl stop
mv homeserver.yaml homeserver-old-sqlite.yaml
mv homeserver-postgres.yaml homeserver.yaml
./synctl start
Synapse should now be running against PostgreSQL.
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
``homeserver.yaml``) and restart synapse. Synapse should now be running against
PostgreSQL.

View File

@@ -1,23 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
{% if has_consented %}
<p>
Your base already belong to us.
</p>
{% else %}
<p>
All your base are belong to us.
</p>
<form method="post" action="consent">
<input type="hidden" name="v" value="{{version}}"/>
<input type="hidden" name="u" value="{{user}}"/>
<input type="hidden" name="h" value="{{userhmac}}"/>
<input type="submit" value="Sure thing!"/>
</form>
{% endif %}
</body>
</html>

View File

@@ -1,11 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<title>Matrix.org Privacy policy</title>
</head>
<body>
<p>
Sweet.
</p>
</body>
</html>

View File

@@ -1,74 +0,0 @@
Server Notices
==============
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
channel whereby server administrators can send messages to users on the server.
They are used as part of communication of the server polices(see
[consent_tracking.md](consent_tracking.md)), however the intention is that
they may also find a use for features such as "Message of the day".
This is a feature specific to Synapse, but it uses standard Matrix
communication mechanisms, so should work with any Matrix client.
User experience
---------------
When the user is first sent a server notice, they will get an invitation to a
room (typically called 'Server Notices', though this is configurable in
`homeserver.yaml`). They will be **unable to reject** this invitation -
attempts to do so will receive an error.
Once they accept the invitation, they will see the notice message in the room
history; it will appear to have come from the 'server notices user' (see
below).
The user is prevented from sending any messages in this room by the power
levels.
Having joined the room, the user can leave the room if they want. Subsequent
server notices will then cause a new room to be created.
Synapse configuration
---------------------
Server notices come from a specific user id on the server. Server
administrators are free to choose the user id - something like `server` is
suggested, meaning the notices will come from
`@server:<your_server_name>`. Once the Server Notices user is configured, that
user id becomes a special, privileged user, so administrators should ensure
that **it is not already allocated**.
In order to support server notices, it is necessary to add some configuration
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
section, which should look like this:
```yaml
server_notices:
system_mxid_localpart: server
system_mxid_display_name: "Server Notices"
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
room_name: "Server Notices"
```
The only compulsory setting is `system_mxid_localpart`, which defines the user
id of the Server Notices user, as above. `room_name` defines the name of the
room which will be created.
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
displayname and avatar of the Server Notices user.
Sending notices
---------------
As of the current version of synapse, there is no convenient interface for
sending notices (other than the automated ones sent as part of consent
tracking).
In the meantime, it is possible to test this feature using the manhole. Having
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
sent with something like:
```
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
```

View File

@@ -30,37 +30,20 @@ requests made to the federation port. The caveats regarding running a
reverse-proxy on the federation port still apply (see
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
To enable workers, you need to add two replication listeners to the master
synapse, e.g.::
To enable workers, you need to add a replication listener to the master synapse, e.g.::
listeners:
# The TCP replication port
- port: 9092
bind_address: '127.0.0.1'
type: replication
# The HTTP replication port
- port: 9093
bind_address: '127.0.0.1'
type: http
resources:
- names: [replication]
Under **no circumstances** should these replication API listeners be exposed to
the public internet; it currently implements no authentication whatsoever and is
Under **no circumstances** should this replication API listener be exposed to the
public internet; it currently implements no authentication whatsoever and is
unencrypted.
(Roughly, the TCP port is used for streaming data from the master to the
workers, and the HTTP port for the workers to send data to the main
synapse process.)
You then create a set of configs for the various worker processes. These
should be worker configuration files, and should be stored in a dedicated
subdirectory, to allow synctl to manipulate them. An additional configuration
for the master synapse process will need to be created because the process will
not be started automatically. That configuration should look like this::
worker_app: synapse.app.homeserver
daemonize: true
subdirectory, to allow synctl to manipulate them.
Each worker configuration file inherits the configuration of the main homeserver
configuration file. You can then override configuration specific to that worker,
@@ -69,13 +52,8 @@ You should minimise the number of overrides though to maintain a usable config.
You must specify the type of worker application (``worker_app``). The currently
available worker applications are listed below. You must also specify the
replication endpoints that it's talking to on the main synapse process.
``worker_replication_host`` should specify the host of the main synapse,
``worker_replication_port`` should point to the TCP replication listener port and
``worker_replication_http_port`` should point to the HTTP replication port.
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
``worker_replication_http_port``.
replication endpoint that it's talking to on the main synapse process
(``worker_replication_host`` and ``worker_replication_port``).
For instance::
@@ -84,7 +62,6 @@ For instance::
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_port: 9092
worker_replication_http_port: 9093
worker_listeners:
- type: http
@@ -173,23 +150,10 @@ endpoints matching the following regular expressions::
^/_matrix/federation/v1/backfill/
^/_matrix/federation/v1/get_missing_events/
^/_matrix/federation/v1/publicRooms
^/_matrix/federation/v1/query/
^/_matrix/federation/v1/make_join/
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/v1/send_join/
^/_matrix/federation/v1/send_leave/
^/_matrix/federation/v1/invite/
^/_matrix/federation/v1/query_auth/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/send/
The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration.
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
instance.
``synapse.app.federation_sender``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -219,10 +183,6 @@ Handles client API endpoints. It can handle REST endpoints matching the
following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
``synapse.app.user_dir``
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -241,31 +201,9 @@ regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
If ``use_presence`` is False in the homeserver config, it can also handle REST
endpoints matching the following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
This "stub" presence handler will pass through ``GET`` request but make the
``PUT`` effectively a no-op.
It will proxy any requests it cannot handle to the main synapse instance. It
must therefore be configured with the location of the main instance, via
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
file. For example::
worker_main_http_uri: http://127.0.0.1:8008
``synapse.app.event_creator``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handles some event creation. It can handle REST endpoints matching::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|unstable)/join/
^/_matrix/client/(api/v1|r0|unstable)/profile/
It will create events locally and then send them on to the main synapse
instance to be persisted and handled.

View File

@@ -1,7 +1,5 @@
#! /bin/bash
set -eux
cd "`dirname $0`/.."
TOX_DIR=$WORKSPACE/.tox
@@ -16,20 +14,7 @@ fi
tox -e py27 --notest -v
TOX_BIN=$TOX_DIR/py27/bin
# cryptography 2.2 requires setuptools >= 18.5.
#
# older versions of virtualenv (?) give us a virtualenv with the same version
# of setuptools as is installed on the system python (and tox runs virtualenv
# under python3, so we get the version of setuptools that is installed on that).
#
# anyway, make sure that we have a recent enough setuptools.
$TOX_BIN/pip install 'setuptools>=18.5'
# we also need a semi-recent version of pip, because old ones fail to install
# the "enum34" dependency of cryptography.
$TOX_BIN/pip install 'pip>=10'
$TOX_BIN/pip install setuptools
{ python synapse/python_dependencies.py
echo lxml
echo lxml psycopg2
} | xargs $TOX_BIN/pip install

View File

@@ -1,30 +0,0 @@
[tool.towncrier]
package = "synapse"
filename = "CHANGES.md"
directory = "changelog.d"
issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
[[tool.towncrier.type]]
directory = "feature"
name = "Features"
showcontent = true
[[tool.towncrier.type]]
directory = "bugfix"
name = "Bugfixes"
showcontent = true
[[tool.towncrier.type]]
directory = "doc"
name = "Improved Documentation"
showcontent = true
[[tool.towncrier.type]]
directory = "removal"
name = "Deprecations and Removals"
showcontent = true
[[tool.towncrier.type]]
directory = "misc"
name = "Internal Changes"
showcontent = true

View File

@@ -21,4 +21,4 @@ try:
verifier.verify(macaroon, key)
print "Signature is correct"
except Exception as e:
print str(e)
print e.message

View File

@@ -18,22 +18,14 @@
from __future__ import print_function
import argparse
from urlparse import urlparse, urlunparse
import nacl.signing
import json
import base64
import requests
import sys
from requests.adapters import HTTPAdapter
import srvlookup
import yaml
# uncomment the following to enable debug logging of http requests
#from httplib import HTTPConnection
#HTTPConnection.debuglevel = 1
def encode_base64(input_bytes):
"""Encode bytes as a base64 string without any padding."""
@@ -121,6 +113,17 @@ def read_signing_keys(stream):
return keys
def lookup(destination, path):
if ":" in destination:
return "https://%s%s" % (destination, path)
else:
try:
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
return "https://%s:%d%s" % (srv.host, srv.port, path)
except:
return "https://%s:%d%s" % (destination, 8448, path)
def request_json(method, origin_name, origin_key, destination, path, content):
if method is None:
if content is None:
@@ -149,19 +152,13 @@ def request_json(method, origin_name, origin_key, destination, path, content):
authorization_headers.append(bytes(header))
print ("Authorization: %s" % header, file=sys.stderr)
dest = "matrix://%s%s" % (destination, path)
dest = lookup(destination, path)
print ("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
s.mount("matrix://", MatrixConnectionAdapter())
result = s.request(
result = requests.request(
method=method,
url=dest,
headers={
"Host": destination,
"Authorization": authorization_headers[0]
},
headers={"Authorization": authorization_headers[0]},
verify=False,
data=content,
)
@@ -245,39 +242,5 @@ def read_args_from_config(args):
args.signing_key_path = config['signing_key_path']
class MatrixConnectionAdapter(HTTPAdapter):
@staticmethod
def lookup(s):
if s[-1] == ']':
# ipv6 literal (with no port)
return s, 8448
if ":" in s:
out = s.rsplit(":",1)
try:
port = int(out[1])
except ValueError:
raise ValueError("Invalid host:port '%s'" % s)
return out[0], port
try:
srv = srvlookup.lookup("matrix", "tcp", s)[0]
return srv.host, srv.port
except:
return s, 8448
def get_connection(self, url, proxies=None):
parsed = urlparse(url)
(host, port) = self.lookup(parsed.netloc)
netloc = "%s:%d" % (host, port)
print("Connecting to %s" % (netloc,), file=sys.stderr)
url = urlunparse((
"https", netloc, parsed.path, parsed.params, parsed.query,
parsed.fragment,
))
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
if __name__ == "__main__":
main()

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -e
# Fetch the current GitHub issue number, add one to it -- presto! The likely
# next PR number.
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
echo $CURRENT_NUMBER

View File

@@ -0,0 +1,47 @@
#!/bin/bash
## CAUTION:
## This script will remove (hopefully) all trace of the given room ID from
## your homeserver.db
## Do not run it lightly.
ROOMID="$1"
sqlite3 homeserver.db <<EOF
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
DELETE FROM event_edges WHERE room_id = '$ROOMID';
DELETE FROM room_depth WHERE room_id = '$ROOMID';
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
DELETE FROM events WHERE room_id = '$ROOMID';
DELETE FROM event_json WHERE room_id = '$ROOMID';
DELETE FROM state_events WHERE room_id = '$ROOMID';
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
DELETE FROM feedback WHERE room_id = '$ROOMID';
DELETE FROM topics WHERE room_id = '$ROOMID';
DELETE FROM room_names WHERE room_id = '$ROOMID';
DELETE FROM rooms WHERE room_id = '$ROOMID';
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
DELETE FROM state_groups WHERE room_id = '$ROOMID';
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
DELETE FROM guest_access WHERE room_id = '$ROOMID';
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
DELETE FROM room_tags WHERE room_id = '$ROOMID';
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
DELETE FROM local_invites WHERE room_id = '$ROOMID';
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
DELETE FROM event_reports WHERE room_id = '$ROOMID';
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
DELETE FROM event_auth WHERE room_id = '$ROOMID';
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
VACUUM;
EOF

View File

@@ -26,37 +26,11 @@ import yaml
def request_registration(user, password, server_location, shared_secret, admin=False):
req = urllib2.Request(
"%s/_matrix/client/r0/admin/register" % (server_location,),
headers={'Content-Type': 'application/json'}
)
try:
if sys.version_info[:3] >= (2, 7, 9):
# As of version 2.7.9, urllib2 now checks SSL certs
import ssl
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
else:
f = urllib2.urlopen(req)
body = f.read()
f.close()
nonce = json.loads(body)["nonce"]
except urllib2.HTTPError as e:
print "ERROR! Received %d %s" % (e.code, e.reason,)
if 400 <= e.code < 500:
if e.info().type == "application/json":
resp = json.load(e)
if "error" in resp:
print resp["error"]
sys.exit(1)
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
mac.update(nonce)
mac.update("\x00")
mac.update(user)
mac.update("\x00")
mac.update(password)
@@ -66,10 +40,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
mac = mac.hexdigest()
data = {
"nonce": nonce,
"username": user,
"user": user,
"password": password,
"mac": mac,
"type": "org.matrix.login.shared_secret",
"admin": admin,
}
@@ -78,7 +52,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
print "Sending registration request..."
req = urllib2.Request(
"%s/_matrix/client/r0/admin/register" % (server_location,),
"%s/_matrix/client/api/v1/register" % (server_location,),
data=json.dumps(data),
headers={'Content-Type': 'application/json'}
)
@@ -133,7 +107,7 @@ def register_new_user(user, password, server_location, shared_secret, admin):
print "Passwords do not match"
sys.exit(1)
if admin is None:
if not admin:
admin = raw_input("Make admin [no]: ")
if admin in ("y", "yes", "true"):
admin = True
@@ -160,16 +134,10 @@ if __name__ == "__main__":
default=None,
help="New password for user. Will prompt if omitted.",
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
parser.add_argument(
"-a", "--admin",
action="store_true",
help="Register new user as an admin. Will prompt if --no-admin is not set either.",
)
admin_group.add_argument(
"--no-admin",
action="store_true",
help="Register new user as a regular user. Will prompt if --admin is not set either.",
help="Register new user as an admin. Will prompt if omitted.",
)
group = parser.add_mutually_exclusive_group(required=True)
@@ -203,8 +171,4 @@ if __name__ == "__main__":
else:
secret = args.shared_secret
admin = None
if args.admin or args.no_admin:
admin = args.admin
register_new_user(args.user, args.password, args.server_url, secret, admin)
register_new_user(args.user, args.password, args.server_url, secret, args.admin)

View File

@@ -1,7 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,8 +29,6 @@ import time
import traceback
import yaml
from six import string_types
logger = logging.getLogger("synapse_port_db")
@@ -253,12 +250,6 @@ class Porter(object):
@defer.inlineCallbacks
def handle_table(self, table, postgres_size, table_size, forward_chunk,
backward_chunk):
logger.info(
"Table %s: %i/%i (rows %i-%i) already ported",
table, postgres_size, table_size,
backward_chunk+1, forward_chunk-1,
)
if not table_size:
return
@@ -476,10 +467,31 @@ class Porter(object):
self.progress.set_state("Preparing PostgreSQL")
self.setup_db(postgres_config, postgres_engine)
self.progress.set_state("Creating port tables")
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master",
keyvalues={
"type": "table",
},
retcol="name",
)
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={},
retcol="distinct table_name",
)
tables = set(sqlite_tables) & set(postgres_tables)
self.progress.set_state("Creating tables")
logger.info("Found %d tables", len(tables))
def create_port_table(txn):
txn.execute(
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
"CREATE TABLE port_from_sqlite3 ("
" table_name varchar(100) NOT NULL UNIQUE,"
" forward_rowid bigint NOT NULL,"
" backward_rowid bigint NOT NULL"
@@ -505,33 +517,18 @@ class Porter(object):
"alter_table", alter_table
)
except Exception as e:
pass
logger.info("Failed to create port table: %s", e)
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
try:
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
except Exception as e:
logger.info("Failed to create port table: %s", e)
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master",
keyvalues={
"type": "table",
},
retcol="name",
)
self.progress.set_state("Setting up")
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={},
retcol="distinct table_name",
)
tables = set(sqlite_tables) & set(postgres_tables)
logger.info("Found %d tables", len(tables))
# Step 3. Figure out what still needs copying
self.progress.set_state("Checking on port progress")
# Set up tables.
setup_res = yield defer.gatherResults(
[
self.setup_table(table)
@@ -542,8 +539,7 @@ class Porter(object):
consumeErrors=True,
)
# Step 4. Do the copying.
self.progress.set_state("Copying to postgres")
# Process tables.
yield defer.gatherResults(
[
self.handle_table(*res)
@@ -552,9 +548,6 @@ class Porter(object):
consumeErrors=True,
)
# Step 5. Do final post-processing
yield self._setup_state_group_id_seq()
self.progress.done()
except:
global end_error_exec_info
@@ -576,7 +569,7 @@ class Porter(object):
def conv(j, col):
if j in bool_cols:
return bool(col)
elif isinstance(col, string_types) and "\0" in col:
elif isinstance(col, basestring) and "\0" in col:
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
raise BadValueException();
return col
@@ -714,16 +707,6 @@ class Porter(object):
defer.returnValue((done, remaining + done))
def _setup_state_group_id_seq(self):
def r(txn):
txn.execute("SELECT MAX(id) FROM state_groups")
next_id = txn.fetchone()[0]+1
txn.execute(
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
(next_id,),
)
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
##############################################
###### The following is simply UI stuff ######

View File

@@ -14,27 +14,7 @@ ignore =
pylint.cfg
tox.ini
[pep8]
max-line-length = 90
# W503 requires that binary operators be at the end, not start, of lines. Erik
# doesn't like it. E203 is contrary to PEP8. E731 is silly.
ignore = W503,E203,E731
[flake8]
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
# pep8 to do those checks), but not the "max-line-length" setting
max-line-length = 90
ignore=W503,E203,E731
[isort]
line_length = 89
not_skip = __init__.py
sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
default_section=THIRDPARTY
known_first_party = synapse
known_tests=tests
known_compat = mock,six
known_twisted=twisted,OpenSSL
multi_line_output=3
include_trailing_comma=true
combine_as_imports=true
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
ignore = W503

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,14 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory
from twisted.names.dns import DNSDatagramProtocol
protocol.Factory.noisy = False
Factory.noisy = False
DNSDatagramProtocol.noisy = False
except ImportError:
pass
__version__ = "0.33.5.1"
__version__ = "0.26.0"

View File

@@ -15,20 +15,15 @@
import logging
from six import itervalues
import pymacaroons
from netaddr import IPAddress
from twisted.internet import defer
import synapse.types
from synapse import event_auth
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, Codes, ResourceLimitError
from synapse.config.server import is_threepid_reserved
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, Codes
from synapse.types import UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
from synapse.util.caches.lrucache import LruCache
from synapse.util.metrics import Measure
@@ -62,17 +57,16 @@ class Auth(object):
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
register_cache("cache", "token_cache", self.token_cache)
register_cache("token_cache", self.token_cache)
@defer.inlineCallbacks
def check_from_context(self, event, context, do_sig_check=True):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.compute_auth_events(
event, prev_state_ids, for_verification=True,
event, context.prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
(e.type, e.state_key): e for e in itervalues(auth_events)
(e.type, e.state_key): e for e in auth_events.values()
}
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
@@ -195,7 +189,7 @@ class Auth(object):
synapse.types.create_requester(user_id, app_service=app_service)
)
access_token = self.get_access_token_from_request(
access_token = get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
@@ -210,11 +204,11 @@ class Auth(object):
ip_addr = self.hs.get_ip_from_request(request)
user_agent = request.requestHeaders.getRawHeaders(
b"User-Agent",
default=[b""]
)[0].decode('ascii', 'surrogateescape')
"User-Agent",
default=[""]
)[0]
if user and access_token and ip_addr:
yield self.store.insert_client_ip(
self.store.insert_client_ip(
user_id=user.to_string(),
access_token=access_token,
ip=ip_addr,
@@ -241,22 +235,17 @@ class Auth(object):
@defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(
get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
)
if app_service is None:
defer.returnValue((None, None))
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
defer.returnValue((None, None))
if b"user_id" not in request.args:
if "user_id" not in request.args:
defer.returnValue((app_service.sender, app_service))
user_id = request.args[b"user_id"][0].decode('utf8')
user_id = request.args["user_id"][0]
if app_service.sender == user_id:
defer.returnValue((app_service.sender, app_service))
@@ -497,7 +486,7 @@ class Auth(object):
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
logger.warn("Unrecognised access token - not in store.")
logger.warn("Unrecognised access token - not in store: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
@@ -515,12 +504,12 @@ class Auth(object):
def get_appservice_by_req(self, request):
try:
token = self.get_access_token_from_request(
token = get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token.")
logger.warn("Unrecognised appservice access token: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
@@ -546,8 +535,7 @@ class Auth(object):
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
auth_events_entries = yield self.store.add_event_hashes(
auth_ids
@@ -665,7 +653,7 @@ class Auth(object):
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
send_level = event_auth.get_send_level(
EventTypes.Aliases, "", power_level_event,
EventTypes.Aliases, "", auth_events
)
user_level = event_auth.get_user_power_level(user_id, auth_events)
@@ -676,156 +664,67 @@ class Auth(object):
" edit its room list entry"
)
@staticmethod
def has_access_token(request):
"""Checks if the request has an access_token.
Returns:
bool: False if no access_token was given, True otherwise.
"""
query_params = request.args.get(b"access_token")
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
return bool(query_params) or bool(auth_headers)
def has_access_token(request):
"""Checks if the request has an access_token.
@staticmethod
def get_access_token_from_request(request, token_not_found_http_status=401):
"""Extracts the access_token from the request.
Returns:
bool: False if no access_token was given, True otherwise.
"""
query_params = request.args.get("access_token")
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
return bool(query_params) or bool(auth_headers)
Args:
request: The http request.
token_not_found_http_status(int): The HTTP status code to set in the
AuthError if the token isn't found. This is used in some of the
legacy APIs to change the status code to 403 from the default of
401 since some of the old clients depended on auth errors returning
403.
Returns:
unicode: The access_token
Raises:
AuthError: If there isn't an access_token in the request.
"""
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
query_params = request.args.get(b"access_token")
if auth_headers:
# Try the get the access_token from a "Authorization: Bearer"
# header
if query_params is not None:
raise AuthError(
token_not_found_http_status,
"Mixing Authorization headers and access_token query parameters.",
errcode=Codes.MISSING_TOKEN,
)
if len(auth_headers) > 1:
raise AuthError(
token_not_found_http_status,
"Too many Authorization headers.",
errcode=Codes.MISSING_TOKEN,
)
parts = auth_headers[0].split(b" ")
if parts[0] == b"Bearer" and len(parts) == 2:
return parts[1].decode('ascii')
else:
raise AuthError(
token_not_found_http_status,
"Invalid Authorization header.",
errcode=Codes.MISSING_TOKEN,
)
else:
# Try to get the access_token from the query params.
if not query_params:
raise AuthError(
token_not_found_http_status,
"Missing access token.",
errcode=Codes.MISSING_TOKEN
)
def get_access_token_from_request(request, token_not_found_http_status=401):
"""Extracts the access_token from the request.
return query_params[0].decode('ascii')
Args:
request: The http request.
token_not_found_http_status(int): The HTTP status code to set in the
AuthError if the token isn't found. This is used in some of the
legacy APIs to change the status code to 403 from the default of
401 since some of the old clients depended on auth errors returning
403.
Returns:
str: The access_token
Raises:
AuthError: If there isn't an access_token in the request.
"""
@defer.inlineCallbacks
def check_in_room_or_world_readable(self, room_id, user_id):
"""Checks that the user is or was in the room or the room is world
readable. If it isn't then an exception is raised.
Returns:
Deferred[tuple[str, str|None]]: Resolves to the current membership of
the user in the room and the membership event ID of the user. If
the user is not in the room and never has been, then
`(Membership.JOIN, None)` is returned.
"""
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
except AuthError:
visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (
visibility and
visibility.content["history_visibility"] == "world_readable"
):
defer.returnValue((Membership.JOIN, None))
return
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
query_params = request.args.get("access_token")
if auth_headers:
# Try the get the access_token from a "Authorization: Bearer"
# header
if query_params is not None:
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
token_not_found_http_status,
"Mixing Authorization headers and access_token query parameters.",
errcode=Codes.MISSING_TOKEN,
)
if len(auth_headers) > 1:
raise AuthError(
token_not_found_http_status,
"Too many Authorization headers.",
errcode=Codes.MISSING_TOKEN,
)
parts = auth_headers[0].split(" ")
if parts[0] == "Bearer" and len(parts) == 2:
return parts[1]
else:
raise AuthError(
token_not_found_http_status,
"Invalid Authorization header.",
errcode=Codes.MISSING_TOKEN,
)
else:
# Try to get the access_token from the query params.
if not query_params:
raise AuthError(
token_not_found_http_status,
"Missing access token.",
errcode=Codes.MISSING_TOKEN
)
@defer.inlineCallbacks
def check_auth_blocking(self, user_id=None, threepid=None):
"""Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag
Args:
user_id(str|None): If present, checks for presence against existing
MAU cohort
threepid(dict|None): If present, checks for presence against configured
reserved threepid. Used in cases where the user is trying register
with a MAU blocked server, normally they would be rejected but their
threepid is on the reserved list. user_id and
threepid should never be set at the same time.
"""
# Never fail an auth check for the server notices users
# This can be a problem where event creation is prohibited due to blocking
if user_id == self.hs.config.server_notices_mxid:
return
if self.hs.config.hs_disabled:
raise ResourceLimitError(
403, self.hs.config.hs_disabled_message,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
admin_contact=self.hs.config.admin_contact,
limit_type=self.hs.config.hs_disabled_limit_type
)
if self.hs.config.limit_usage_by_mau is True:
assert not (user_id and threepid)
# If the user is already part of the MAU cohort or a trial user
if user_id:
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
if timestamp:
return
is_trial = yield self.store.is_trial_user(user_id)
if is_trial:
return
elif threepid:
# If the user does not exist yet, but is signing up with a
# reserved threepid then pass auth check
if is_threepid_reserved(self.hs.config, threepid):
return
# Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value:
raise ResourceLimitError(
403, "Monthly Active User Limit Exceeded",
admin_contact=self.hs.config.admin_contact,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
limit_type="monthly_active_user"
)
return query_params[0]

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,9 +16,6 @@
"""Contains constants from the specification."""
# the "depth" field on events is limited to 2**63 - 1
MAX_DEPTH = 2**63 - 1
class Membership(object):
@@ -77,9 +73,6 @@ class EventTypes(object):
Topic = "m.room.topic"
Name = "m.room.name"
ServerACL = "m.room.server_acl"
Pinned = "m.room.pinned_events"
class RejectedReason(object):
AUTH_ERROR = "auth_error"
@@ -96,19 +89,3 @@ class RoomCreationPreset(object):
class ThirdPartyEntityKind(object):
USER = "user"
LOCATION = "location"
class RoomVersions(object):
V1 = "1"
VDH_TEST = "vdh-test-version"
# the version we will give rooms which are created on this server
DEFAULT_ROOM_VERSION = RoomVersions.V1
# vdh-test-version is a placeholder to get room versioning support working and tested
# until we have a working v2.
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
ServerNoticeMsgType = "m.server_notice"
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"

Some files were not shown because too many files have changed in this diff Show More