Compare commits
5 Commits
hhs-7
...
matthew/st
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03bdbb8c6b | ||
|
|
18752982db | ||
|
|
6dacdd5fbe | ||
|
|
c82785f5cb | ||
|
|
a34061d332 |
@@ -1,114 +0,0 @@
|
||||
version: 2
|
||||
jobs:
|
||||
sytestpy2:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy2postgres:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy2merged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
sytestpy2postgresmerged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
sytestpy3:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3postgres:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3merged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3postgresmerged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- sytestpy2
|
||||
- sytestpy2postgres
|
||||
- sytestpy2merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master/
|
||||
- sytestpy2postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master/
|
||||
# Currently broken while the Python 3 port is incomplete
|
||||
# - sytestpy3
|
||||
# - sytestpy3postgres
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
||||
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
||||
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> "$BASH_ENV"
|
||||
source $BASH_ENV
|
||||
|
||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||
then
|
||||
echo "Can't figure out what the PR number is!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
|
||||
# Show what we are before
|
||||
git show -s
|
||||
|
||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||
git fetch -u origin $GITBASE
|
||||
git merge --no-edit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git show -s
|
||||
@@ -3,5 +3,3 @@ Dockerfile
|
||||
.gitignore
|
||||
demo/etc
|
||||
tox.ini
|
||||
.git/*
|
||||
.tox/*
|
||||
|
||||
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -27,9 +27,8 @@ Describe here the problem that you are experiencing, or the feature you are requ
|
||||
|
||||
Describe how what happens differs from what you expected.
|
||||
|
||||
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those (please be careful to remove any personal or private data). Please surround them with
|
||||
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
|
||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those here (please be careful to remove any personal or private data):
|
||||
|
||||
### Version information
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -44,7 +44,6 @@ media_store/
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
*venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
|
||||
@@ -20,17 +20,9 @@ matrix:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check_isort
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
|
||||
|
||||
@@ -62,7 +62,4 @@ Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
||||
|
||||
Serban Constantin <serban.constantin at gmail dot com>
|
||||
* Small bug fix
|
||||
* Docker packaging
|
||||
2708
CHANGES.md
2708
CHANGES.md
File diff suppressed because it is too large
Load Diff
2839
CHANGES.rst
Normal file
2839
CHANGES.rst
Normal file
File diff suppressed because it is too large
Load Diff
@@ -30,11 +30,11 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
pull request for feedback.
|
||||
|
||||
Code style
|
||||
@@ -51,23 +51,22 @@ makes it horribly hard to review otherwise.
|
||||
Changelog
|
||||
~~~~~~~~~
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
All changes, even minor ones, need a corresponding changelog
|
||||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
file named in the format of ``issuenumberOrPR.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
the file is your changelog entry, which can contain RestructuredText
|
||||
formatting. A note of contributors is welcomed in changelogs for
|
||||
non-misc changes (the content of misc changes is not displayed).
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix".
|
||||
For example, a fix for a bug reported in #1234 would have its
|
||||
changelog entry in ``changelog.d/1234.bugfix``, and contain content
|
||||
like "The security levels of Florbs are now validated when
|
||||
recieved over federation. Contributed by Jane Matrix".
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
@@ -126,7 +125,7 @@ the contribution or otherwise have the right to contribute it to Matrix::
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment::
|
||||
|
||||
|
||||
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM docker.io/python:2-alpine3.7
|
||||
|
||||
RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev libxslt-dev
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
# A wheel cache may be provided in ./cache for faster build
|
||||
RUN cd /synapse \
|
||||
&& pip install --upgrade pip setuptools psycopg2 lxml \
|
||||
&& mkdir -p /synapse/cache \
|
||||
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||
&& mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
|
||||
&& rm -rf setup.py setup.cfg synapse
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
EXPOSE 8008/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
@@ -2,7 +2,6 @@ include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include *.md
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
@@ -35,5 +34,3 @@ recursive-include changelog.d *
|
||||
|
||||
prune .github
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune .circleci
|
||||
|
||||
55
README.rst
55
README.rst
@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
@@ -157,15 +157,13 @@ if you prefer.
|
||||
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
||||
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker`. Further information on this including configuration options is available in `contrib/docker/README.md`.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
Configuring synapse
|
||||
-------------------
|
||||
@@ -285,7 +283,7 @@ Connecting to Synapse from a client
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
https://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||
@@ -331,7 +329,7 @@ Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
@@ -350,7 +348,7 @@ Platform-Specific Instructions
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||
|
||||
@@ -364,19 +362,6 @@ Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
OpenSUSE
|
||||
--------
|
||||
|
||||
Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
|
||||
|
||||
sudo zypper install matrix-synapse
|
||||
|
||||
SUSE Linux Enterprise Server
|
||||
----------------------------
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
@@ -539,7 +524,7 @@ Troubleshooting Running
|
||||
-----------------------
|
||||
|
||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||
encryption and digital signatures.
|
||||
Unfortunately PyNACL currently has a few issues
|
||||
(https://github.com/pyca/pynacl/issues/53) and
|
||||
@@ -687,8 +672,8 @@ useful just for development purposes. See `<demo/README>`_.
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
|
||||
The advantages of Postgres include:
|
||||
@@ -712,7 +697,7 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
@@ -742,18 +727,6 @@ so an example nginx configuration might look like::
|
||||
}
|
||||
}
|
||||
|
||||
and an example apache configuration may look like::
|
||||
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
1
changelog.d/3316.feature
Normal file
1
changelog.d/3316.feature
Normal file
@@ -0,0 +1 @@
|
||||
Enforce the specified API for report_event
|
||||
1
changelog.d/3496.feature
Normal file
1
changelog.d/3496.feature
Normal file
@@ -0,0 +1 @@
|
||||
Include CPU time from database threads in request/block metrics.
|
||||
1
changelog.d/3497.feature
Normal file
1
changelog.d/3497.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add CPU metrics for _fetch_event_list
|
||||
0
changelog.d/3498.misc
Normal file
0
changelog.d/3498.misc
Normal file
0
changelog.d/3499.misc
Normal file
0
changelog.d/3499.misc
Normal file
0
changelog.d/3501.misc
Normal file
0
changelog.d/3501.misc
Normal file
1
changelog.d/3505.feature
Normal file
1
changelog.d/3505.feature
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database consumption when processing large numbers of receipts
|
||||
1
changelog.d/3521.feature
Normal file
1
changelog.d/3521.feature
Normal file
@@ -0,0 +1 @@
|
||||
Cache optimisation for /sync requests
|
||||
1
changelog.d/3533.bugfix
Normal file
1
changelog.d/3533.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix queued federation requests being processed in the wrong order
|
||||
1
changelog.d/3534.misc
Normal file
1
changelog.d/3534.misc
Normal file
@@ -0,0 +1 @@
|
||||
refactor: use parse_{string,integer} and assert's from http.servlet for deduplication
|
||||
0
changelog.d/3535.misc
Normal file
0
changelog.d/3535.misc
Normal file
@@ -1 +0,0 @@
|
||||
CircleCI tests now run on the potential merge of a PR.
|
||||
@@ -1 +0,0 @@
|
||||
http/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Remove connection ID for replication prometheus metrics, as it creates a large number of new series.
|
||||
@@ -1 +0,0 @@
|
||||
Improve human readable error messages for threepid registration/account update
|
||||
@@ -1 +0,0 @@
|
||||
Implement `event_format` filter param in `/sync`
|
||||
@@ -1 +0,0 @@
|
||||
Make /sync slightly faster by avoiding needless copies
|
||||
@@ -1 +0,0 @@
|
||||
guest users should not be part of mau total
|
||||
@@ -1 +0,0 @@
|
||||
handlers/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted.
|
||||
@@ -1 +0,0 @@
|
||||
Limit the number of PDUs/EDUs per federation transaction
|
||||
@@ -1 +0,0 @@
|
||||
Only start postgres instance for postgres tests on Travis CI
|
||||
@@ -1 +0,0 @@
|
||||
tests/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Fix existing room tags not coming down sync when joining a room
|
||||
@@ -1 +0,0 @@
|
||||
crypto/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
rest/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Fix jwt import check
|
||||
@@ -1 +0,0 @@
|
||||
add some logging for the keyring queue
|
||||
@@ -1 +0,0 @@
|
||||
speed up lazy loading by 2-3x
|
||||
@@ -1 +0,0 @@
|
||||
Improved Dockerfile to remove build requirements after building reducing the image size.
|
||||
@@ -1 +0,0 @@
|
||||
fix VOIP crashes under Python 3 (#3821)
|
||||
@@ -1 +0,0 @@
|
||||
Disable lazy loading for incremental syncs for now
|
||||
@@ -1 +0,0 @@
|
||||
Fix manhole so that it works with latest openssh clients
|
||||
@@ -1 +0,0 @@
|
||||
Fix outbound requests occasionally wedging, which can result in federation breaking between servers.
|
||||
@@ -1 +0,0 @@
|
||||
Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users
|
||||
@@ -1 +0,0 @@
|
||||
federation/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Show heroes if room name/canonical alias has been deleted
|
||||
@@ -1 +0,0 @@
|
||||
Log when we retry outbound requests
|
||||
@@ -1,5 +1,29 @@
|
||||
# Synapse Docker
|
||||
|
||||
The `matrixdotorg/synapse` Docker image will run Synapse as a single process. It does not provide a
|
||||
database server or a TURN server, you should run these separately.
|
||||
|
||||
If you run a Postgres server, you should simply include it in the same Compose
|
||||
project or set the proper environment variables and the image will automatically
|
||||
use that server.
|
||||
|
||||
## Build
|
||||
|
||||
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||
|
||||
```
|
||||
docker build -t docker.io/matrixdotorg/synapse .
|
||||
```
|
||||
|
||||
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||
|
||||
You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
|
||||
|
||||
## Run
|
||||
|
||||
This image is designed to run either with an automatically generated configuration
|
||||
file or with a custom configuration that requires manual edition.
|
||||
|
||||
### Automated configuration
|
||||
|
||||
It is recommended that you use Docker Compose to run your containers, including
|
||||
@@ -36,6 +60,94 @@ Then, customize your configuration and run the server:
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### More information
|
||||
### Without Compose
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
If you do not wish to use Compose, you may still run this image using plain
|
||||
Docker commands. Note that the following is just a guideline and you may need
|
||||
to add parameters to the docker run command to account for the network situation
|
||||
with your postgres database.
|
||||
|
||||
```
|
||||
docker run \
|
||||
-d \
|
||||
--name synapse \
|
||||
-v ${DATA_PATH}:/data \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
-e SYNAPSE_REPORT_STATS=yes \
|
||||
docker.io/matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
## Volumes
|
||||
|
||||
The image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
|
||||
You are free to use separate volumes depending on storage endpoints at your
|
||||
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||
performance hdd storage while other files could be stored on high performance
|
||||
endpoints.
|
||||
|
||||
In order to setup an application service, simply create an ``appservices``
|
||||
directory in the data volume and write the application service Yaml
|
||||
configuration file there. Multiple application services are supported.
|
||||
|
||||
## Environment
|
||||
|
||||
Unless you specify a custom path for the configuration file, a very generic
|
||||
file will be generated, based on the following environment settings.
|
||||
These are a good starting point for setting up your own deployment.
|
||||
|
||||
Global settings:
|
||||
|
||||
* ``UID``, the user id Synapse will run as [default 991]
|
||||
* ``GID``, the group id Synapse will run as [default 991]
|
||||
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||
|
||||
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||
then customize it manually. No other environment variable is required.
|
||||
|
||||
Otherwise, a dynamic configuration file will be used. The following environment
|
||||
variables are available for configuration:
|
||||
|
||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||
statistics reporting back to the Matrix project which helps us to get funding.
|
||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||
you run your own TLS-capable reverse proxy).
|
||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||
the Synapse instance.
|
||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||
uris to enable TURN for this homeserver.
|
||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||
|
||||
Shared secrets, that will be initialized to random values if not set:
|
||||
|
||||
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||
registration is disable.
|
||||
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
||||
to the server.
|
||||
|
||||
Database specific values (will use SQLite if not set):
|
||||
|
||||
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||
|
||||
Mail server specific values (will not send emails if not set):
|
||||
|
||||
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
||||
|
||||
@@ -6,7 +6,6 @@ version: '3'
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build: ../..
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since snyapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up additional recording rules
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,41 +0,0 @@
|
||||
FROM docker.io/python:2-alpine3.8
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
RUN apk add --no-cache --virtual .build_deps \
|
||||
build-base \
|
||||
libffi-dev \
|
||||
libjpeg-turbo-dev \
|
||||
libressl-dev \
|
||||
libxslt-dev \
|
||||
linux-headers \
|
||||
postgresql-dev \
|
||||
zlib-dev \
|
||||
&& cd /synapse \
|
||||
&& apk add --no-cache --virtual .runtime_deps \
|
||||
libffi \
|
||||
libjpeg-turbo \
|
||||
libressl \
|
||||
libxslt \
|
||||
libpq \
|
||||
zlib \
|
||||
su-exec \
|
||||
&& pip install --upgrade \
|
||||
lxml \
|
||||
pip \
|
||||
psycopg2 \
|
||||
setuptools \
|
||||
&& mkdir -p /synapse/cache \
|
||||
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||
&& mv /synapse/docker/start.py /synapse/docker/conf / \
|
||||
&& rm -rf \
|
||||
setup.cfg \
|
||||
setup.py \
|
||||
synapse \
|
||||
&& apk del .build_deps
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
EXPOSE 8008/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
124
docker/README.md
124
docker/README.md
@@ -1,124 +0,0 @@
|
||||
# Synapse Docker
|
||||
|
||||
This Docker image will run Synapse as a single process. It does not provide a database
|
||||
server or a TURN server, you should run these separately.
|
||||
|
||||
## Run
|
||||
|
||||
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
|
||||
We instead release only tagged versions so upgrading between releases is entirely
|
||||
within your control.
|
||||
|
||||
### Using docker-compose (easier)
|
||||
|
||||
This image is designed to run either with an automatically generated configuration
|
||||
file or with a custom configuration that requires manual editing.
|
||||
|
||||
An easy way to make use of this image is via docker-compose. See the
|
||||
[contrib/docker](../contrib/docker)
|
||||
section of the synapse project for examples.
|
||||
|
||||
### Without Compose (harder)
|
||||
|
||||
If you do not wish to use Compose, you may still run this image using plain
|
||||
Docker commands. Note that the following is just a guideline and you may need
|
||||
to add parameters to the docker run command to account for the network situation
|
||||
with your postgres database.
|
||||
|
||||
```
|
||||
docker run \
|
||||
-d \
|
||||
--name synapse \
|
||||
-v ${DATA_PATH}:/data \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
-e SYNAPSE_REPORT_STATS=yes \
|
||||
docker.io/matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
## Volumes
|
||||
|
||||
The image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
|
||||
You are free to use separate volumes depending on storage endpoints at your
|
||||
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||
performance hdd storage while other files could be stored on high performance
|
||||
endpoints.
|
||||
|
||||
In order to setup an application service, simply create an ``appservices``
|
||||
directory in the data volume and write the application service Yaml
|
||||
configuration file there. Multiple application services are supported.
|
||||
|
||||
## Environment
|
||||
|
||||
Unless you specify a custom path for the configuration file, a very generic
|
||||
file will be generated, based on the following environment settings.
|
||||
These are a good starting point for setting up your own deployment.
|
||||
|
||||
Global settings:
|
||||
|
||||
* ``UID``, the user id Synapse will run as [default 991]
|
||||
* ``GID``, the group id Synapse will run as [default 991]
|
||||
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||
|
||||
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||
then customize it manually. No other environment variable is required.
|
||||
|
||||
Otherwise, a dynamic configuration file will be used. The following environment
|
||||
variables are available for configuration:
|
||||
|
||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||
statistics reporting back to the Matrix project which helps us to get funding.
|
||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||
you run your own TLS-capable reverse proxy).
|
||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||
the Synapse instance.
|
||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||
uris to enable TURN for this homeserver.
|
||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||
|
||||
Shared secrets, that will be initialized to random values if not set:
|
||||
|
||||
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||
registration is disable.
|
||||
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
||||
to the server.
|
||||
|
||||
Database specific values (will use SQLite if not set):
|
||||
|
||||
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||
|
||||
Mail server specific values (will not send emails if not set):
|
||||
|
||||
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
||||
|
||||
## Build
|
||||
|
||||
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||
|
||||
```
|
||||
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
|
||||
```
|
||||
|
||||
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||
|
||||
You may have a local Python wheel cache available, in which case copy the relevant
|
||||
packages in the ``cache/`` directory at the root of the project.
|
||||
@@ -1,63 +0,0 @@
|
||||
Shared-Secret Registration
|
||||
==========================
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
(``registration_shared_secret`` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API::
|
||||
|
||||
> GET /_matrix/client/r0/admin/register
|
||||
|
||||
< {"nonce": "thisisanonce"}
|
||||
|
||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
||||
body containing the nonce, username, password, whether they are an admin
|
||||
(optional, False by default), and a HMAC digest of the content.
|
||||
|
||||
As an example::
|
||||
|
||||
> POST /_matrix/client/r0/admin/register
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
"password": "pizza",
|
||||
"admin": true,
|
||||
"mac": "mac_digest_here"
|
||||
}
|
||||
|
||||
< {
|
||||
"access_token": "token_here",
|
||||
"user_id": "@pepper_roni:localhost",
|
||||
"home_server": "test",
|
||||
"device_id": "device_id_here"
|
||||
}
|
||||
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, and either
|
||||
the string "admin" or "notadmin", each separated by NULs. For an example of
|
||||
generation in Python::
|
||||
|
||||
import hmac, hashlib
|
||||
|
||||
def generate_mac(nonce, user, password, admin=False):
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(user.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
|
||||
return mac.hexdigest()
|
||||
@@ -74,7 +74,7 @@ replication endpoints that it's talking to on the main synapse process.
|
||||
``worker_replication_port`` should point to the TCP replication listener port and
|
||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||
|
||||
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
|
||||
Currently, only the ``event_creator`` worker requires specifying
|
||||
``worker_replication_http_port``.
|
||||
|
||||
For instance::
|
||||
@@ -173,23 +173,10 @@ endpoints matching the following regular expressions::
|
||||
^/_matrix/federation/v1/backfill/
|
||||
^/_matrix/federation/v1/get_missing_events/
|
||||
^/_matrix/federation/v1/publicRooms
|
||||
^/_matrix/federation/v1/query/
|
||||
^/_matrix/federation/v1/make_join/
|
||||
^/_matrix/federation/v1/make_leave/
|
||||
^/_matrix/federation/v1/send_join/
|
||||
^/_matrix/federation/v1/send_leave/
|
||||
^/_matrix/federation/v1/invite/
|
||||
^/_matrix/federation/v1/query_auth/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/send/
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
||||
instance.
|
||||
|
||||
``synapse.app.federation_sender``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -219,10 +206,6 @@ Handles client API endpoints. It can handle REST endpoints matching the
|
||||
following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -241,14 +224,6 @@ regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||
|
||||
If ``use_presence`` is False in the homeserver config, it can also handle REST
|
||||
endpoints matching the following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
|
||||
|
||||
This "stub" presence handler will pass through ``GET`` request but make the
|
||||
``PUT`` effectively a no-op.
|
||||
|
||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||
must therefore be configured with the location of the main instance, via
|
||||
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||
@@ -265,7 +240,6 @@ Handles some event creation. It can handle REST endpoints matching::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
|
||||
@@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
@@ -1,30 +1,5 @@
|
||||
[tool.towncrier]
|
||||
package = "synapse"
|
||||
filename = "CHANGES.md"
|
||||
filename = "CHANGES.rst"
|
||||
directory = "changelog.d"
|
||||
issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "feature"
|
||||
name = "Features"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "bugfix"
|
||||
name = "Bugfixes"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "doc"
|
||||
name = "Improved Documentation"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "removal"
|
||||
name = "Deprecations and Removals"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "misc"
|
||||
name = "Internal Changes"
|
||||
showcontent = true
|
||||
issue_format = "`#{issue} <https://github.com/matrix-org/synapse/issues/{issue}>`_"
|
||||
|
||||
@@ -26,37 +26,11 @@ import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
try:
|
||||
if sys.version_info[:3] >= (2, 7, 9):
|
||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||
import ssl
|
||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||
else:
|
||||
f = urllib2.urlopen(req)
|
||||
body = f.read()
|
||||
f.close()
|
||||
nonce = json.loads(body)["nonce"]
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce)
|
||||
mac.update("\x00")
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
@@ -66,10 +40,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"nonce": nonce,
|
||||
"username": user,
|
||||
"user": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
@@ -78,7 +52,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
13
setup.cfg
13
setup.cfg
@@ -14,17 +14,11 @@ ignore =
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik
|
||||
# doesn't like it. E203 is contrary to PEP8. E731 is silly.
|
||||
ignore = W503,E203,E731
|
||||
|
||||
[flake8]
|
||||
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
|
||||
# pep8 to do those checks), but not the "max-line-length" setting
|
||||
max-line-length = 90
|
||||
ignore=W503,E203,E731
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
# E203 is contrary to PEP8.
|
||||
ignore = W503,E203
|
||||
|
||||
[isort]
|
||||
line_length = 89
|
||||
@@ -37,4 +31,3 @@ known_compat = mock,six
|
||||
known_twisted=twisted,OpenSSL
|
||||
multi_line_output=3
|
||||
include_trailing_comma=true
|
||||
combine_as_imports=true
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.33.4"
|
||||
__version__ = "0.32.2"
|
||||
|
||||
@@ -25,8 +25,7 @@ from twisted.internet import defer
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.api.errors import AuthError, Codes
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -66,9 +65,8 @@ class Auth(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True,
|
||||
event, context.prev_state_ids, for_verification=True,
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {
|
||||
@@ -212,9 +210,9 @@ class Auth(object):
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
b"User-Agent",
|
||||
default=[b""]
|
||||
)[0].decode('ascii', 'surrogateescape')
|
||||
)[0]
|
||||
if user and access_token and ip_addr:
|
||||
yield self.store.insert_client_ip(
|
||||
self.store.insert_client_ip(
|
||||
user_id=user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -253,10 +251,10 @@ class Auth(object):
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
if "user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
user_id = request.args[b"user_id"][0].decode('utf8')
|
||||
user_id = request.args["user_id"][0]
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
@@ -546,8 +544,7 @@ class Auth(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_auth_events(self, builder, context):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
|
||||
auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
@@ -683,7 +680,7 @@ class Auth(object):
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
query_params = request.args.get(b"access_token")
|
||||
query_params = request.args.get("access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@@ -699,7 +696,7 @@ class Auth(object):
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
str: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
"""
|
||||
@@ -721,9 +718,9 @@ class Auth(object):
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
parts = auth_headers[0].split(b" ")
|
||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||
return parts[1].decode('ascii')
|
||||
parts = auth_headers[0].split(" ")
|
||||
if parts[0] == "Bearer" and len(parts) == 2:
|
||||
return parts[1]
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
@@ -739,93 +736,4 @@ class Auth(object):
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
)
|
||||
|
||||
return query_params[0].decode('ascii')
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_in_room_or_world_readable(self, room_id, user_id):
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
the user is not in the room and never has been, then
|
||||
`(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility and
|
||||
visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403, self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
limit_type=self.hs.config.hs_disabled_limit_type
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403, "Monthly Active User Limit Exceeded",
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type="monthly_active_user"
|
||||
)
|
||||
return query_params[0]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -69,6 +68,7 @@ class EventTypes(object):
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
Encryption = "m.room.encryption"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
@@ -78,7 +78,6 @@ class EventTypes(object):
|
||||
Name = "m.room.name"
|
||||
|
||||
ServerACL = "m.room.server_acl"
|
||||
Pinned = "m.room.pinned_events"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
@@ -96,19 +95,3 @@ class RoomCreationPreset(object):
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -56,9 +55,6 @@ class Codes(object):
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -73,6 +69,20 @@ class CodeMessageException(RuntimeError):
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(self.msg)
|
||||
|
||||
|
||||
class MatrixCodeMessageException(CodeMessageException):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
@@ -98,28 +108,38 @@ class SynapseError(CodeMessageException):
|
||||
self.errcode,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_http_response_exception(cls, err):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
|
||||
super(ProxiedRequestError, self).__init__(
|
||||
code, msg, errcode
|
||||
)
|
||||
if additional_fields is None:
|
||||
self._additional_fields = {}
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
**self._additional_fields
|
||||
)
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Args:
|
||||
err (HttpResponseException):
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(err.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.get('error', err.msg)
|
||||
|
||||
res = SynapseError(err.code, errmsg, errcode)
|
||||
return res
|
||||
|
||||
|
||||
class ConsentNotGivenError(SynapseError):
|
||||
@@ -231,30 +251,6 @@ class AuthError(SynapseError):
|
||||
super(AuthError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ResourceLimitError(SynapseError):
|
||||
"""
|
||||
Any error raised when there is a problem with resource usage.
|
||||
For instance, the monthly active user limit for the server has been exceeded
|
||||
"""
|
||||
def __init__(
|
||||
self, code, msg,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=None,
|
||||
limit_type=None,
|
||||
):
|
||||
self.admin_contact = admin_contact
|
||||
self.limit_type = limit_type
|
||||
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
admin_contact=self.admin_contact,
|
||||
limit_type=self.limit_type
|
||||
)
|
||||
|
||||
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
@@ -312,25 +308,12 @@ class LimitExceededError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support."""
|
||||
|
||||
def __init__(self, room_version):
|
||||
super(IncompatibleRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Your homeserver does not support the features required to "
|
||||
"join this room",
|
||||
errcode=Codes.INCOMPATIBLE_ROOM_VERSION,
|
||||
)
|
||||
|
||||
self._room_version = room_version
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
room_version=self._room_version,
|
||||
)
|
||||
def cs_exception(exception):
|
||||
if isinstance(exception, CodeMessageException):
|
||||
return exception.error_dict()
|
||||
else:
|
||||
logger.error("Unknown exception type: %s", type(exception))
|
||||
return {}
|
||||
|
||||
|
||||
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
@@ -389,7 +372,7 @@ class HttpResponseException(CodeMessageException):
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
Attributes:
|
||||
response (bytes): body of response
|
||||
response (str): body of response
|
||||
"""
|
||||
def __init__(self, code, msg, response):
|
||||
"""
|
||||
@@ -397,39 +380,7 @@ class HttpResponseException(CodeMessageException):
|
||||
Args:
|
||||
code (int): HTTP status code
|
||||
msg (str): reason phrase from HTTP response status line
|
||||
response (bytes): body of response
|
||||
response (str): body of response
|
||||
"""
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
self.response = response
|
||||
|
||||
def to_synapse_error(self):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(self.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
|
||||
if not isinstance(j, dict):
|
||||
j = {}
|
||||
|
||||
errcode = j.pop('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.pop('error', self.msg)
|
||||
|
||||
return ProxiedRequestError(self.code, errmsg, errcode, j)
|
||||
|
||||
@@ -113,13 +113,7 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
},
|
||||
"contains_url": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"lazy_load_members": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"include_redundant_members": {
|
||||
"type": "boolean"
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,7 +245,6 @@ class FilterCollection(object):
|
||||
"include_leave", False
|
||||
)
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
self.event_format = filter_json.get("event_format", "client")
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
@@ -268,12 +261,6 @@ class FilterCollection(object):
|
||||
def ephemeral_limit(self):
|
||||
return self._room_ephemeral_filter.limit()
|
||||
|
||||
def lazy_load_members(self):
|
||||
return self._room_state_filter.lazy_load_members()
|
||||
|
||||
def include_redundant_members(self):
|
||||
return self._room_state_filter.include_redundant_members()
|
||||
|
||||
def filter_presence(self, events):
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
@@ -430,12 +417,6 @@ class Filter(object):
|
||||
def limit(self):
|
||||
return self.filter_json.get("limit", 10)
|
||||
|
||||
def lazy_load_members(self):
|
||||
return self.filter_json.get("lazy_load_members", False)
|
||||
|
||||
def include_redundant_members(self):
|
||||
return self.filter_json.get("include_redundant_members", False)
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
|
||||
@@ -72,7 +72,7 @@ class Ratelimiter(object):
|
||||
return allowed, time_allowed
|
||||
|
||||
def prune_message_counts(self, time_now_s):
|
||||
for user_id in list(self.message_counts.keys()):
|
||||
for user_id in self.message_counts.keys():
|
||||
message_count, time_start, msg_rate_hz = (
|
||||
self.message_counts[user_id]
|
||||
)
|
||||
|
||||
@@ -140,7 +140,7 @@ def listen_metrics(bind_addresses, port):
|
||||
logger.info("Metrics now reporting on %s:%d", host, port)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
"""
|
||||
@@ -156,9 +156,7 @@ def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
|
||||
):
|
||||
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||
"""
|
||||
Create an SSL socket for a port and several addresses
|
||||
"""
|
||||
|
||||
@@ -51,7 +51,10 @@ class AppserviceSlaveStore(
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
DATASTORE_CLASS = AppserviceSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -114,9 +117,8 @@ class ASReplicationHandler(ReplicationClientHandler):
|
||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
|
||||
@@ -31,7 +31,6 @@ from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
@@ -39,15 +38,9 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -59,14 +52,13 @@ logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedAccountDataStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
TransactionStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
@@ -74,7 +66,10 @@ class ClientReaderSlavedStore(
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = ClientReaderSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -87,13 +82,7 @@ class ClientReaderServer(HomeServer):
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
RoomMemberListRestServlet(self).register(resource)
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
@@ -165,13 +154,11 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -43,13 +43,8 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinRoomAliasServlet,
|
||||
RoomMembershipRestServlet,
|
||||
@@ -58,7 +53,6 @@ from synapse.rest.client.v1.room import (
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -68,11 +62,8 @@ logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
TransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
@@ -90,7 +81,10 @@ class EventCreatorSlavedStore(
|
||||
|
||||
|
||||
class EventCreatorServer(HomeServer):
|
||||
DATASTORE_CLASS = EventCreatorSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -107,9 +101,6 @@ class EventCreatorServer(HomeServer):
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
ProfileAvatarURLRestServlet(self).register(resource)
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
@@ -183,13 +174,11 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -32,17 +32,11 @@ from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -55,24 +49,21 @@ logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedAccountDataStore,
|
||||
SlavedProfileStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedPusherStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationReaderSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -152,13 +143,11 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -36,11 +36,11 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -50,7 +50,7 @@ logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore, SlavedTransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
@@ -78,7 +78,10 @@ class FederationSenderSlaveStore(
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationSenderSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -141,9 +144,8 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||
@@ -184,13 +186,11 @@ def start(config_options):
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -38,7 +38,6 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
|
||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -50,35 +49,6 @@ from synapse.util.versionstring import get_version_string
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri,
|
||||
headers=headers,
|
||||
)
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
@@ -148,7 +118,10 @@ class FrontendProxySlavedStore(
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
DATASTORE_CLASS = FrontendProxySlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -162,12 +135,6 @@ class FrontendProxyServer(HomeServer):
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
@@ -186,8 +153,7 @@ class FrontendProxyServer(HomeServer):
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
reactor=self.get_reactor()
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
@@ -242,13 +208,11 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -18,10 +18,6 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
@@ -51,7 +47,6 @@ from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
|
||||
@@ -62,7 +57,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore, are_all_users_on_domain
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
@@ -111,8 +106,6 @@ def build_resource_for_web_client(hs):
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
@@ -304,15 +297,6 @@ class SynapseHomeServer(HomeServer):
|
||||
quit_with_error(e.message)
|
||||
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
"Registered users with reserved threepids"
|
||||
)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
@@ -344,7 +328,6 @@ def setup(config_options):
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
@@ -353,7 +336,6 @@ def setup(config_options):
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
@@ -362,13 +344,13 @@ def setup(config_options):
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
|
||||
try:
|
||||
with hs.get_db_conn(run_new_connection=False) as db_conn:
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
db_conn.commit()
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
@@ -443,9 +425,6 @@ def run(hs):
|
||||
# currently either 0 or 1
|
||||
stats_process = []
|
||||
|
||||
def start_phone_stats_home():
|
||||
return run_as_background_process("phone_stats_home", phone_stats_home)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
@@ -463,7 +442,7 @@ def run(hs):
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
for name, count in daily_user_type_results.iteritems():
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
@@ -474,7 +453,7 @@ def run(hs):
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
for name, count in r30_results.iteritems():
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
@@ -517,45 +496,16 @@ def run(hs):
|
||||
)
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
return run_as_background_process(
|
||||
"generate_user_daily_visits",
|
||||
hs.get_datastore().generate_user_daily_visits,
|
||||
)
|
||||
hs.get_datastore().generate_user_daily_visits()
|
||||
|
||||
# Rather than update on per session basis, batch up the requests.
|
||||
# If you increase the loop period, the accuracy of user_daily_visits
|
||||
# table will decrease
|
||||
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
clock.looping_call(
|
||||
hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
|
||||
)
|
||||
hs.get_datastore().reap_monthly_active_users()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
current_mau_count = 0
|
||||
reserved_count = 0
|
||||
store = hs.get_datastore()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
current_mau_count = yield store.get_monthly_active_count()
|
||||
reserved_count = yield store.get_registered_reserved_users_count()
|
||||
current_mau_gauge.set(float(current_mau_count))
|
||||
registered_reserved_users_mau_gauge.set(float(reserved_count))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
|
||||
hs.get_datastore().initialise_reserved_users(
|
||||
hs.config.mau_limits_reserved_threepids
|
||||
)
|
||||
generate_monthly_active_users()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
if hs.config.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
@@ -563,7 +513,7 @@ def run(hs):
|
||||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(5 * 60, start_phone_stats_home)
|
||||
clock.call_later(5 * 60, phone_stats_home)
|
||||
|
||||
if hs.config.daemonize and hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
|
||||
@@ -34,7 +34,7 @@ from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -52,7 +52,7 @@ class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedTransactionStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
):
|
||||
@@ -60,7 +60,10 @@ class MediaRepositorySlavedStore(
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
DATASTORE_CLASS = MediaRepositorySlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -152,13 +155,11 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -78,7 +78,10 @@ class PusherSlaveStore(
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
DATASTORE_CLASS = PusherSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
@@ -145,9 +148,8 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -160,11 +162,11 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
self.pusher_pool.on_new_notifications(
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.pusher_pool.on_new_receipts(
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
except Exception:
|
||||
|
||||
@@ -55,6 +55,7 @@ from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -80,7 +81,9 @@ class SynchrotronSlavedStore(
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
did_forget = (
|
||||
RoomMemberStore.__dict__["did_forget"]
|
||||
)
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
@@ -114,10 +117,7 @@ class SynchrotronPresence(object):
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
if self.hs.config.use_presence:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
@@ -214,13 +214,10 @@ class SynchrotronPresence(object):
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
if self.hs.config.use_presence:
|
||||
return [
|
||||
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||
if count > 0
|
||||
]
|
||||
else:
|
||||
return set()
|
||||
return [
|
||||
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||
if count > 0
|
||||
]
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
@@ -249,7 +246,10 @@ class SynchrotronApplicationService(object):
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
DATASTORE_CLASS = SynchrotronSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -335,9 +335,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
|
||||
@@ -25,8 +25,6 @@ import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import yaml
|
||||
|
||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||
@@ -175,7 +173,7 @@ def main():
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
cache_factors = config.get("synctl_cache_factors", {})
|
||||
for cache_name, factor in iteritems(cache_factors):
|
||||
for cache_name, factor in cache_factors.iteritems():
|
||||
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
|
||||
|
||||
worker_configfiles = []
|
||||
|
||||
@@ -94,7 +94,10 @@ class UserDirectorySlaveStore(
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
DATASTORE_CLASS = UserDirectorySlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -166,9 +169,8 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.user_directory = hs.get_user_directory_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
yield super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == "current_state_deltas":
|
||||
@@ -212,13 +214,11 @@ def start(config_options):
|
||||
config.update_user_directory = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = UserDirectoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
import urllib
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -99,7 +98,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
@@ -120,7 +119,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
@@ -154,7 +153,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.parse.quote(protocol)
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
@@ -189,7 +188,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.parse.quote(protocol)
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
@@ -229,7 +228,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
txn_id = str(txn_id)
|
||||
|
||||
uri = service.url + ("/transactions/%s" %
|
||||
urllib.parse.quote(txn_id))
|
||||
urllib.quote(txn_id))
|
||||
try:
|
||||
yield self.put_json(
|
||||
uri=uri,
|
||||
|
||||
@@ -21,7 +21,7 @@ from .consent_config import ConsentConfig
|
||||
from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt_config import JWTConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
from .metrics import MetricsConfig
|
||||
|
||||
@@ -168,8 +168,7 @@ def setup_logging(config, use_worker_options=False):
|
||||
if log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3,
|
||||
encoding='utf8'
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
|
||||
def sighup(signum, stack):
|
||||
@@ -194,8 +193,9 @@ def setup_logging(config, use_worker_options=False):
|
||||
|
||||
def sighup(signum, stack):
|
||||
# it might be better to use a file watcher or something for this.
|
||||
logging.info("Reloading log config from %s due to SIGHUP",
|
||||
log_config)
|
||||
load_log_config()
|
||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||
|
||||
load_log_config()
|
||||
|
||||
|
||||
@@ -49,9 +49,6 @@ class ServerConfig(Config):
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
|
||||
# Whether to enable user presence.
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
@@ -70,31 +67,6 @@ class ServerConfig(Config):
|
||||
"block_non_admin_invites", False,
|
||||
)
|
||||
|
||||
# Options to control access by tracking MAU
|
||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||
self.max_mau_value = 0
|
||||
if self.limit_usage_by_mau:
|
||||
self.max_mau_value = config.get(
|
||||
"max_mau_value", 0,
|
||||
)
|
||||
|
||||
self.mau_limits_reserved_threepids = config.get(
|
||||
"mau_limit_reserved_threepids", []
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get(
|
||||
"mau_trial_days", 0,
|
||||
)
|
||||
|
||||
# Options to disable HS
|
||||
self.hs_disabled = config.get("hs_disabled", False)
|
||||
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
||||
self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
|
||||
|
||||
# Admin uri to direct users at should their instance become blocked
|
||||
# due to resource constraints
|
||||
self.admin_contact = config.get("admin_contact", None)
|
||||
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
federation_domain_whitelist = config.get(
|
||||
@@ -237,8 +209,6 @@ class ServerConfig(Config):
|
||||
# different cores. See
|
||||
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||
#
|
||||
# This setting requires the affinity package to be installed!
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||
@@ -258,9 +228,6 @@ class ServerConfig(Config):
|
||||
# hard limit.
|
||||
soft_file_limit: 0
|
||||
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
use_presence: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
@@ -352,33 +319,6 @@ class ServerConfig(Config):
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
@@ -404,23 +344,6 @@ class ServerConfig(Config):
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def is_threepid_reserved(config, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
Args:
|
||||
config(ServerConfig) - to access server config attributes
|
||||
threepid(dict) - The threepid to test for
|
||||
|
||||
Returns:
|
||||
boolean Is the threepid undertest reserved_user
|
||||
"""
|
||||
|
||||
for tp in config.mau_limits_reserved_threepids:
|
||||
if (threepid['medium'] == tp['medium']
|
||||
and threepid['address'] == tp['address']):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_gc_thresholds(thresholds):
|
||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||
the thresholds are integers if thresholds are supplied.
|
||||
|
||||
46
synapse/config/stats.py
Normal file
46
synapse/config/stats.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
Configuration for the behaviour of synapse's stats engine
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
self.stats_enable = False
|
||||
self.stats_bucket_size = 86400
|
||||
self.stats_retention = sys.maxint
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enable = stats_config.get("enable", self.stats_enable)
|
||||
self.stats_bucket_size = stats_config.get(
|
||||
"bucket_size", self.stats_bucket_size
|
||||
)
|
||||
self.stats_retention = stats_config.get("retention", self.stats_retention)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Stats configuration
|
||||
#
|
||||
# stats:
|
||||
# enable: false
|
||||
# bucket_size: 86400 # 1 day
|
||||
# retention: 31536000 # 1 year
|
||||
"""
|
||||
@@ -30,10 +30,10 @@ class VoipConfig(Config):
|
||||
## Turn ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
#turn_uris: []
|
||||
turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
#turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
|
||||
@@ -11,22 +11,19 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet import ssl
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerContextFactory(ContextFactory):
|
||||
class ServerContextFactory(ssl.ContextFactory):
|
||||
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
|
||||
connections."""
|
||||
connections and to make connections to remote servers."""
|
||||
|
||||
def __init__(self, config):
|
||||
self._context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
@@ -51,78 +48,3 @@ class ServerContextFactory(ContextFactory):
|
||||
|
||||
def getContext(self):
|
||||
return self._context
|
||||
|
||||
|
||||
def _idnaBytes(text):
|
||||
"""
|
||||
Convert some text typed by a human into some ASCII bytes. This is a
|
||||
copy of twisted.internet._idna._idnaBytes. For documentation, see the
|
||||
twisted documentation.
|
||||
"""
|
||||
try:
|
||||
import idna
|
||||
except ImportError:
|
||||
return text.encode("idna")
|
||||
else:
|
||||
return idna.encode(text)
|
||||
|
||||
|
||||
def _tolerateErrors(wrapped):
|
||||
"""
|
||||
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
|
||||
the error is immediately logged and the connection is dropped if possible.
|
||||
This is a copy of twisted.internet._sslverify._tolerateErrors. For
|
||||
documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def infoCallback(connection, where, ret):
|
||||
try:
|
||||
return wrapped(connection, where, ret)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
f = Failure()
|
||||
logger.exception("Error during info_callback")
|
||||
connection.get_app_data().failVerification(f)
|
||||
|
||||
return infoCallback
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class ClientTLSOptions(object):
|
||||
"""
|
||||
Client creator for TLS without certificate identity verification. This is a
|
||||
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
|
||||
verification left out. For documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, ctx):
|
||||
self._ctx = ctx
|
||||
self._hostname = hostname
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
connection.set_app_data(tlsProtocol)
|
||||
return connection
|
||||
|
||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START:
|
||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
class ClientTLSOptionsFactory(object):
|
||||
"""Factory for Twisted ClientTLSOptions that are used to make connections
|
||||
to remote servers for federation."""
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
pass
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host,
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ import logging
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.names.error import DomainError
|
||||
from twisted.web.http import HTTPClient
|
||||
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
@@ -32,14 +30,14 @@ KEY_API_V1 = b"/_matrix/key/v1/"
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||
"""Fetch the keys for a remote server."""
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = path
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, tls_client_options_factory, timeout=30
|
||||
reactor, server_name, ssl_context_factory, timeout=30
|
||||
)
|
||||
|
||||
for i in range(5):
|
||||
@@ -49,14 +47,12 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
server_response, server_certificate = yield protocol.remote_key
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
except SynapseKeyClientError as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
if e.status.startswith(b"4"):
|
||||
logger.exception("Error getting key for %r" % (server_name,))
|
||||
if e.status.startswith("4"):
|
||||
# Don't retry for 4xx responses.
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
except (ConnectError, DomainError) as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
except Exception as e:
|
||||
logger.exception("Error getting key for %r", server_name)
|
||||
logger.exception(e)
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
|
||||
|
||||
@@ -82,12 +78,6 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
if not isinstance(self.path, bytes):
|
||||
self.path = self.path.encode('ascii')
|
||||
|
||||
if not isinstance(self.host, bytes):
|
||||
self.host = self.host.encode('ascii')
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
|
||||
@@ -16,10 +16,9 @@
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import urllib
|
||||
from collections import namedtuple
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -41,7 +40,6 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
PreserveLoggingContext,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
@@ -218,34 +216,23 @@ class Keyring(object):
|
||||
servers have completed. Follows the synapse rules of logcontext
|
||||
preservation.
|
||||
"""
|
||||
loop_count = 1
|
||||
while True:
|
||||
wait_on = [
|
||||
(server_name, self.key_downloads[server_name])
|
||||
self.key_downloads[server_name]
|
||||
for server_name in server_names
|
||||
if server_name in self.key_downloads
|
||||
]
|
||||
if not wait_on:
|
||||
if wait_on:
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList(wait_on)
|
||||
else:
|
||||
break
|
||||
logger.info(
|
||||
"Waiting for existing lookups for %s to complete [loop %i]",
|
||||
[w[0] for w in wait_on], loop_count,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList((w[1] for w in wait_on))
|
||||
|
||||
loop_count += 1
|
||||
|
||||
ctx = LoggingContext.current_context()
|
||||
|
||||
def rm(r, server_name_):
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.debug("Releasing key lookup lock on %s", server_name_)
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
return r
|
||||
|
||||
for server_name, deferred in server_to_deferred.items():
|
||||
logger.debug("Got key lookup lock on %s", server_name)
|
||||
self.key_downloads[server_name] = deferred
|
||||
deferred.addBoth(rm, server_name)
|
||||
|
||||
@@ -445,7 +432,7 @@ class Keyring(object):
|
||||
# an incoming request.
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
path=b"/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
@@ -525,9 +512,9 @@ class Keyring(object):
|
||||
continue
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory,
|
||||
path=("/_matrix/key/v2/server/%s" % (
|
||||
urllib.parse.quote(requested_key_id),
|
||||
server_name, self.hs.tls_server_context_factory,
|
||||
path=(b"/_matrix/key/v2/server/%s" % (
|
||||
urllib.quote(requested_key_id),
|
||||
)).encode("ascii"),
|
||||
)
|
||||
|
||||
@@ -668,7 +655,7 @@ class Keyring(object):
|
||||
# Try to fetch the key from the remote server.
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory
|
||||
server_name, self.hs.tls_server_context_factory
|
||||
)
|
||||
|
||||
# Check the response.
|
||||
|
||||
@@ -20,7 +20,7 @@ from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
@@ -83,14 +83,6 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
403,
|
||||
"Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
|
||||
room_version = event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise AuthError(
|
||||
403,
|
||||
"room appears to have unsupported version %s" % (
|
||||
room_version,
|
||||
))
|
||||
# FIXME
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
@@ -13,18 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
"""
|
||||
Attributes:
|
||||
current_state_ids (dict[(str, str), str]):
|
||||
The current state map including the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
prev_state_ids (dict[(str, str), str]):
|
||||
The current state map excluding the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
state_group (int|None): state group id, if the state has been stored
|
||||
as a state group. This is usually only None if e.g. the event is
|
||||
an outlier.
|
||||
@@ -41,77 +45,38 @@ class EventContext(object):
|
||||
|
||||
prev_state_events (?): XXX: is this ever set to anything other than
|
||||
the empty list?
|
||||
|
||||
_current_state_ids (dict[(str, str), str]|None):
|
||||
The current state map including the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_prev_state_ids (dict[(str, str), str]|None):
|
||||
The current state map excluding the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
Only set when state has not been fetched yet.
|
||||
|
||||
_event_state_key (str|None): The state_key of the event the context is
|
||||
associated with. Only set when state has not been fetched yet.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
Only set when state has not been fetched yet.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"current_state_ids",
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
"app_service",
|
||||
"_current_state_ids",
|
||||
"_prev_state_ids",
|
||||
"_prev_state_id",
|
||||
"_event_type",
|
||||
"_event_state_key",
|
||||
"_fetching_state_deferred",
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.prev_state_events = []
|
||||
self.rejected = False
|
||||
self.app_service = None
|
||||
|
||||
@staticmethod
|
||||
def with_state(state_group, current_state_ids, prev_state_ids,
|
||||
prev_group=None, delta_ids=None):
|
||||
context = EventContext()
|
||||
|
||||
# The current state including the current event
|
||||
context._current_state_ids = current_state_ids
|
||||
self.current_state_ids = None
|
||||
# The current state excluding the current event
|
||||
context._prev_state_ids = prev_state_ids
|
||||
context.state_group = state_group
|
||||
self.prev_state_ids = None
|
||||
self.state_group = None
|
||||
|
||||
context._prev_state_id = None
|
||||
context._event_type = None
|
||||
context._event_state_key = None
|
||||
context._fetching_state_deferred = defer.succeed(None)
|
||||
self.rejected = False
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
context.prev_group = prev_group
|
||||
context.delta_ids = delta_ids
|
||||
self.prev_group = None
|
||||
self.delta_ids = None
|
||||
|
||||
return context
|
||||
self.prev_state_events = None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize(self, event, store):
|
||||
self.app_service = None
|
||||
|
||||
def serialize(self, event):
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
@@ -127,12 +92,11 @@ class EventContext(object):
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_ids = yield self.get_prev_state_ids(store)
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
defer.returnValue({
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
@@ -142,9 +106,10 @@ class EventContext(object):
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None
|
||||
})
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@defer.inlineCallbacks
|
||||
def deserialize(store, input):
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
@@ -157,115 +122,32 @@ class EventContext(object):
|
||||
EventContext
|
||||
"""
|
||||
context = EventContext()
|
||||
context.state_group = input["state_group"]
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
context._prev_state_id = input["prev_state_id"]
|
||||
context._event_type = input["event_type"]
|
||||
context._event_state_key = input["event_state_key"]
|
||||
prev_state_id = input["prev_state_id"]
|
||||
event_type = input["event_type"]
|
||||
event_state_key = input["event_state_key"]
|
||||
|
||||
context._current_state_ids = None
|
||||
context._prev_state_ids = None
|
||||
context._fetching_state_deferred = None
|
||||
|
||||
context.state_group = input["state_group"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
context.current_state_ids = yield store.get_state_ids_for_group(
|
||||
context.state_group,
|
||||
)
|
||||
if prev_state_id and event_state_key:
|
||||
context.prev_state_ids = dict(context.current_state_ids)
|
||||
context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
|
||||
else:
|
||||
context.prev_state_ids = context.current_state_ids
|
||||
|
||||
app_service_id = input["app_service_id"]
|
||||
if app_service_id:
|
||||
context.app_service = store.get_app_service_by_id(app_service_id)
|
||||
|
||||
return context
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_current_state_ids(self, store):
|
||||
"""Gets the current state IDs
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store,
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
defer.returnValue(self._current_state_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_prev_state_ids(self, store):
|
||||
"""Gets the prev state IDs
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store,
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
defer.returnValue(self._prev_state_ids)
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if we haven't cached the
|
||||
state or if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
"""
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _fill_out_state(self, store):
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
return
|
||||
|
||||
self._current_state_ids = yield store.get_state_ids_for_group(
|
||||
self.state_group,
|
||||
)
|
||||
if self._prev_state_id and self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(self._current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_state(self, state_group, prev_state_ids, current_state_ids,
|
||||
prev_group, delta_ids):
|
||||
"""Replace the state in the context
|
||||
"""
|
||||
|
||||
# We need to make sure we wait for any ongoing fetching of state
|
||||
# to complete so that the updated state doesn't get clobbered
|
||||
if self._fetching_state_deferred:
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
self.state_group = state_group
|
||||
self._prev_state_ids = prev_state_ids
|
||||
self.prev_group = prev_group
|
||||
self._current_state_ids = current_state_ids
|
||||
self.delta_ids = delta_ids
|
||||
|
||||
# We need to ensure that that we've marked as having fetched the state
|
||||
self._fetching_state_deferred = defer.succeed(None)
|
||||
defer.returnValue(context)
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
@@ -277,7 +159,7 @@ def _encode_state_dict(state_dict):
|
||||
|
||||
return [
|
||||
(etype, state_key, v)
|
||||
for (etype, state_key), v in iteritems(state_dict)
|
||||
for (etype, state_key), v in state_dict.iteritems()
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -13,20 +13,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import DeferredList
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -136,45 +133,34 @@ class FederationBase(object):
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
|
||||
|
||||
redacted_pdus = [
|
||||
prune_event(pdu)
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
ctx = logcontext.LoggingContext.current_context()
|
||||
|
||||
def callback(_, pdu):
|
||||
def callback(_, pdu, redacted):
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||
# incidents.
|
||||
#
|
||||
# This is just a heuristic, so we just assume that if the keys are
|
||||
# about the same between the redacted and received events, then the
|
||||
# received event was probably a redacted copy (but we then use our
|
||||
# *actual* redacted copy to be on the safe side.)
|
||||
redacted_event = prune_event(pdu)
|
||||
if (
|
||||
set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and
|
||||
set(six.iterkeys(redacted_event.content))
|
||||
== set(six.iterkeys(pdu.content))
|
||||
):
|
||||
logger.info(
|
||||
"Event %s seems to have been redacted; using our redacted "
|
||||
"copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Event %s content has been tampered, redacting",
|
||||
pdu.event_id, pdu.get_pdu_json(),
|
||||
)
|
||||
return redacted_event
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return redacted
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return prune_event(pdu)
|
||||
return redacted
|
||||
|
||||
return pdu
|
||||
|
||||
@@ -182,121 +168,21 @@ class FederationBase(object):
|
||||
failure.trap(SynapseError)
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id, failure.getErrorMessage(),
|
||||
"Signature check failed for %s",
|
||||
pdu.event_id,
|
||||
)
|
||||
return failure
|
||||
|
||||
for deferred, pdu in zip(deferreds, pdus):
|
||||
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
||||
deferred.addCallbacks(
|
||||
callback, errback,
|
||||
callbackArgs=[pdu],
|
||||
callbackArgs=[pdu, redacted],
|
||||
errbackArgs=[pdu],
|
||||
)
|
||||
|
||||
return deferreds
|
||||
|
||||
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", [
|
||||
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
|
||||
])):
|
||||
pass
|
||||
|
||||
|
||||
def _check_sigs_on_pdus(keyring, pdus):
|
||||
"""Check that the given events are correctly signed
|
||||
|
||||
Args:
|
||||
keyring (synapse.crypto.Keyring): keyring object to do the checks
|
||||
pdus (Collection[EventBase]): the events to be checked
|
||||
|
||||
Returns:
|
||||
List[Deferred]: a Deferred for each event in pdus, which will either succeed if
|
||||
the signatures are valid, or fail (with a SynapseError) if not.
|
||||
"""
|
||||
|
||||
# (currently this is written assuming the v1 room structure; we'll probably want a
|
||||
# separate function for checking v2 rooms)
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
#
|
||||
# (a) the server which created the event_id
|
||||
#
|
||||
# (b) the sender's server.
|
||||
#
|
||||
# - except in the case of invites created from a 3pid invite, which are exempt
|
||||
# from this check, because the sender has to match that of the original 3pid
|
||||
# invite, but the event may come from a different HS, for reasons that I don't
|
||||
# entirely grok (why do the senders have to match? and if they do, why doesn't the
|
||||
# joining server ask the inviting server to do the switcheroo with
|
||||
# exchange_third_party_invite?).
|
||||
#
|
||||
# That's pretty awful, since redacting such an invite will render it invalid
|
||||
# (because it will then look like a regular invite without a valid signature),
|
||||
# and signatures are *supposed* to be valid whether or not an event has been
|
||||
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
|
||||
#
|
||||
# let's start by getting the domain for each pdu, and flattening the event back
|
||||
# to JSON.
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
event_id_domain=get_domain_from_id(p.event_id),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
for p in pdus
|
||||
]
|
||||
|
||||
# first make sure that the event is signed by the event_id's domain
|
||||
deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.event_id_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check, deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks.
|
||||
pdus_to_check_sender = [
|
||||
p for p in pdus_to_check
|
||||
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.sender_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check_sender
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# replace lists of deferreds with single Deferreds
|
||||
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
||||
|
||||
|
||||
def _flatten_deferred_list(deferreds):
|
||||
"""Given a list of one or more deferreds, either return the single deferred, or
|
||||
combine into a DeferredList.
|
||||
"""
|
||||
if len(deferreds) > 1:
|
||||
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
||||
else:
|
||||
assert len(deferreds) == 1
|
||||
return deferreds[0]
|
||||
|
||||
|
||||
def _is_invite_via_3pid(event):
|
||||
return (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user