Compare commits
109 Commits
hhs-7
...
matthew/de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ef1b716e2 | ||
|
|
9498cd3e7b | ||
|
|
c7503f8f33 | ||
|
|
4ff8486f0f | ||
|
|
2669e494e0 | ||
|
|
b6d8a808a4 | ||
|
|
0cb5d34756 | ||
|
|
650761666d | ||
|
|
aa2a4b4b42 | ||
|
|
022469d819 | ||
|
|
45d06c754a | ||
|
|
dbd0821c43 | ||
|
|
0476852fc6 | ||
|
|
1d11d9323d | ||
|
|
261e4f2542 | ||
|
|
11728561f3 | ||
|
|
9d57abcadd | ||
|
|
cb0bbde981 | ||
|
|
abc97bd1de | ||
|
|
ee238254a0 | ||
|
|
0125b5d002 | ||
|
|
fe265fe990 | ||
|
|
7735eee41d | ||
|
|
3d0faa39fb | ||
|
|
fd28d13e19 | ||
|
|
d18731e252 | ||
|
|
81beae30b8 | ||
|
|
11f1bace3c | ||
|
|
1e8cfc9e77 | ||
|
|
488ed3e444 | ||
|
|
c3ec84dbcd | ||
|
|
0783801659 | ||
|
|
9f2fd29c14 | ||
|
|
6372dff771 | ||
|
|
b3e346f40c | ||
|
|
fb47ce3e6a | ||
|
|
debf04556b | ||
|
|
907a62df28 | ||
|
|
41b987cbc5 | ||
|
|
5c74ab4064 | ||
|
|
06820250c9 | ||
|
|
383c4ae59c | ||
|
|
f639ac143d | ||
|
|
ad0424bab0 | ||
|
|
2992125561 | ||
|
|
ef56b6e27c | ||
|
|
53d6245529 | ||
|
|
25e471dac3 | ||
|
|
76fca1730e | ||
|
|
32e4420a66 | ||
|
|
79b2583f1b | ||
|
|
8a24c4eee5 | ||
|
|
f93cb7410d | ||
|
|
50d5a97c1b | ||
|
|
c06932a029 | ||
|
|
3a62cacfb0 | ||
|
|
4d55b16faa | ||
|
|
105709bf32 | ||
|
|
d7fad867fa | ||
|
|
8fddcf703e | ||
|
|
e2adb360eb | ||
|
|
47ed4a4aa7 | ||
|
|
7fafa838ae | ||
|
|
de341bec1b | ||
|
|
643c89d497 | ||
|
|
6554253f48 | ||
|
|
3add16df49 | ||
|
|
dde01efbcb | ||
|
|
22e416b726 | ||
|
|
b4b7c80181 | ||
|
|
5fc3477fd3 | ||
|
|
8743f42b49 | ||
|
|
7285afa4be | ||
|
|
b22a53e357 | ||
|
|
3c446d0a81 | ||
|
|
240e940c3f | ||
|
|
969ed2e49d | ||
|
|
1147ce7e18 | ||
|
|
0d2b7fdcec | ||
|
|
4e12b10c7c | ||
|
|
e654230a51 | ||
|
|
ef5193e0cb | ||
|
|
7b3959c7f3 | ||
|
|
2e4a6c5aab | ||
|
|
e3eb2cfe8b | ||
|
|
5c341c99f6 | ||
|
|
739d3500fe | ||
|
|
0e2d70e101 | ||
|
|
82c4fd7226 | ||
|
|
e446077478 | ||
|
|
d82c89ac22 | ||
|
|
75b25b3f1f | ||
|
|
1df10d8814 | ||
|
|
8f9340d248 | ||
|
|
c5034cd4b0 | ||
|
|
f7f937d051 | ||
|
|
e52b5d94a9 | ||
|
|
d90f27a21f | ||
|
|
03cf9710e3 | ||
|
|
1dcdd8d568 | ||
|
|
4344fb1faf | ||
|
|
846577ebde | ||
|
|
3869981227 | ||
|
|
fa80b492a5 | ||
|
|
c776c52eed | ||
|
|
b424c16f50 | ||
|
|
313a489fc9 | ||
|
|
4b090cb273 | ||
|
|
3f79378d4b |
@@ -9,8 +9,6 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy2postgres:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -20,34 +18,6 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy2merged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
sytestpy2postgresmerged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
sytestpy3:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -57,8 +27,6 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3postgres:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -68,32 +36,6 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3merged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
sytestpy3postgresmerged:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -101,14 +43,6 @@ workflows:
|
||||
jobs:
|
||||
- sytestpy2
|
||||
- sytestpy2postgres
|
||||
- sytestpy2merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master/
|
||||
- sytestpy2postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master/
|
||||
# Currently broken while the Python 3 port is incomplete
|
||||
# - sytestpy3
|
||||
# - sytestpy3postgres
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
||||
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
||||
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> "$BASH_ENV"
|
||||
source $BASH_ENV
|
||||
|
||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||
then
|
||||
echo "Can't figure out what the PR number is!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
|
||||
# Show what we are before
|
||||
git show -s
|
||||
|
||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||
git fetch -u origin $GITBASE
|
||||
git merge --no-edit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git show -s
|
||||
@@ -3,5 +3,6 @@ Dockerfile
|
||||
.gitignore
|
||||
demo/etc
|
||||
tox.ini
|
||||
synctl
|
||||
.git/*
|
||||
.tox/*
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -44,7 +44,6 @@ media_store/
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
*venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
|
||||
@@ -8,6 +8,9 @@ before_script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
@@ -22,8 +25,6 @@ matrix:
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
@@ -34,6 +35,10 @@ matrix:
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
|
||||
allow_failures:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
|
||||
74
CHANGES.md
74
CHANGES.md
@@ -1,77 +1,3 @@
|
||||
Synapse 0.33.4 (2018-09-07)
|
||||
===========================
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
|
||||
|
||||
|
||||
Synapse 0.33.4rc2 (2018-09-06)
|
||||
==============================
|
||||
|
||||
Pull in security fixes from v0.33.3.1
|
||||
|
||||
|
||||
Synapse 0.33.3.1 (2018-09-06)
|
||||
=============================
|
||||
|
||||
SECURITY FIXES
|
||||
--------------
|
||||
|
||||
- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
|
||||
- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
|
||||
|
||||
|
||||
Synapse 0.33.4rc1 (2018-09-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659))
|
||||
- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680))
|
||||
- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724))
|
||||
- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749))
|
||||
- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722))
|
||||
- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747))
|
||||
- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753))
|
||||
- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754))
|
||||
- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756))
|
||||
- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758))
|
||||
- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760))
|
||||
- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768))
|
||||
- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777))
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378))
|
||||
- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673))
|
||||
- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725))
|
||||
- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726))
|
||||
- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727))
|
||||
- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730))
|
||||
- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734))
|
||||
- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735))
|
||||
- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737))
|
||||
- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740))
|
||||
- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746))
|
||||
- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764))
|
||||
|
||||
|
||||
Synapse 0.33.3 (2018-08-22)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -59,10 +59,9 @@ To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
the file is your changelog entry, which can contain RestructuredText
|
||||
formatting. A note of contributors is welcomed in changelogs for
|
||||
non-misc changes (the content of misc changes is not displayed).
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
|
||||
17
README.rst
17
README.rst
@@ -167,6 +167,11 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see
|
||||
https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
Configuring synapse
|
||||
-------------------
|
||||
|
||||
@@ -742,18 +747,6 @@ so an example nginx configuration might look like::
|
||||
}
|
||||
}
|
||||
|
||||
and an example apache configuration may look like::
|
||||
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
CircleCI tests now run on the potential merge of a PR.
|
||||
@@ -1 +0,0 @@
|
||||
http/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Remove connection ID for replication prometheus metrics, as it creates a large number of new series.
|
||||
@@ -1 +0,0 @@
|
||||
Improve human readable error messages for threepid registration/account update
|
||||
@@ -1 +0,0 @@
|
||||
Implement `event_format` filter param in `/sync`
|
||||
@@ -1 +0,0 @@
|
||||
Make /sync slightly faster by avoiding needless copies
|
||||
@@ -1 +0,0 @@
|
||||
guest users should not be part of mau total
|
||||
@@ -1 +0,0 @@
|
||||
handlers/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted.
|
||||
@@ -1 +0,0 @@
|
||||
Limit the number of PDUs/EDUs per federation transaction
|
||||
@@ -1 +0,0 @@
|
||||
Only start postgres instance for postgres tests on Travis CI
|
||||
@@ -1 +0,0 @@
|
||||
tests/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Fix existing room tags not coming down sync when joining a room
|
||||
@@ -1 +0,0 @@
|
||||
crypto/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
rest/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Fix jwt import check
|
||||
@@ -1 +0,0 @@
|
||||
add some logging for the keyring queue
|
||||
@@ -1 +0,0 @@
|
||||
speed up lazy loading by 2-3x
|
||||
@@ -1 +0,0 @@
|
||||
Improved Dockerfile to remove build requirements after building reducing the image size.
|
||||
@@ -1 +0,0 @@
|
||||
fix VOIP crashes under Python 3 (#3821)
|
||||
@@ -1 +0,0 @@
|
||||
Disable lazy loading for incremental syncs for now
|
||||
@@ -1 +0,0 @@
|
||||
Fix manhole so that it works with latest openssh clients
|
||||
@@ -1 +0,0 @@
|
||||
Fix outbound requests occasionally wedging, which can result in federation breaking between servers.
|
||||
@@ -1 +0,0 @@
|
||||
Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users
|
||||
@@ -1 +0,0 @@
|
||||
federation/ is now ported to Python 3.
|
||||
@@ -1 +0,0 @@
|
||||
Show heroes if room name/canonical alias has been deleted
|
||||
@@ -1 +0,0 @@
|
||||
Log when we retry outbound requests
|
||||
@@ -1,8 +1,6 @@
|
||||
FROM docker.io/python:2-alpine3.8
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
RUN apk add --no-cache --virtual .build_deps \
|
||||
RUN apk add --no-cache --virtual .nacl_deps \
|
||||
build-base \
|
||||
libffi-dev \
|
||||
libjpeg-turbo-dev \
|
||||
@@ -10,16 +8,13 @@ RUN apk add --no-cache --virtual .build_deps \
|
||||
libxslt-dev \
|
||||
linux-headers \
|
||||
postgresql-dev \
|
||||
zlib-dev \
|
||||
&& cd /synapse \
|
||||
&& apk add --no-cache --virtual .runtime_deps \
|
||||
libffi \
|
||||
libjpeg-turbo \
|
||||
libressl \
|
||||
libxslt \
|
||||
libpq \
|
||||
zlib \
|
||||
su-exec \
|
||||
su-exec \
|
||||
zlib-dev
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
# A wheel cache may be provided in ./cache for faster build
|
||||
RUN cd /synapse \
|
||||
&& pip install --upgrade \
|
||||
lxml \
|
||||
pip \
|
||||
@@ -31,9 +26,8 @@ RUN apk add --no-cache --virtual .build_deps \
|
||||
&& rm -rf \
|
||||
setup.cfg \
|
||||
setup.py \
|
||||
synapse \
|
||||
&& apk del .build_deps
|
||||
|
||||
synapse
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
EXPOSE 8008/tcp 8448/tcp
|
||||
|
||||
@@ -74,7 +74,7 @@ replication endpoints that it's talking to on the main synapse process.
|
||||
``worker_replication_port`` should point to the TCP replication listener port and
|
||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||
|
||||
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
|
||||
Currently, only the ``event_creator`` worker requires specifying
|
||||
``worker_replication_http_port``.
|
||||
|
||||
For instance::
|
||||
@@ -265,7 +265,6 @@ Handles some event creation. It can handle REST endpoints matching::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
|
||||
@@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
7
res/templates-dinsic/mail-Vector.css
Normal file
7
res/templates-dinsic/mail-Vector.css
Normal file
@@ -0,0 +1,7 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
156
res/templates-dinsic/mail.css
Normal file
156
res/templates-dinsic/mail.css
Normal file
@@ -0,0 +1,156 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
45
res/templates-dinsic/notif.html
Normal file
45
res/templates-dinsic/notif.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">Voir {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
16
res/templates-dinsic/notif.txt
Normal file
16
res/templates-dinsic/notif.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
Voir {{ room.title }} à {{ notif.link }}
|
||||
55
res/templates-dinsic/notif_mail.html
Normal file
55
res/templates-dinsic/notif_mail.html
Normal file
@@ -0,0 +1,55 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Bonjour {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Se désinscrire</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
10
res/templates-dinsic/notif_mail.txt
Normal file
10
res/templates-dinsic/notif_mail.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Bonjour {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}
|
||||
|
||||
33
res/templates-dinsic/room.html
Normal file
33
res/templates-dinsic/room.html
Normal file
@@ -0,0 +1,33 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Rejoindre la conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
9
res/templates-dinsic/room.txt
Normal file
9
res/templates-dinsic/room.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -17,14 +17,13 @@ ignore =
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik
|
||||
# doesn't like it. E203 is contrary to PEP8. E731 is silly.
|
||||
ignore = W503,E203,E731
|
||||
# doesn't like it. E203 is contrary to PEP8.
|
||||
ignore = W503,E203
|
||||
|
||||
[flake8]
|
||||
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
|
||||
# pep8 to do those checks), but not the "max-line-length" setting
|
||||
max-line-length = 90
|
||||
ignore=W503,E203,E731
|
||||
|
||||
[isort]
|
||||
line_length = 89
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.33.4"
|
||||
__version__ = "0.33.3"
|
||||
|
||||
@@ -26,7 +26,6 @@ import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
@@ -776,56 +775,34 @@ class Auth(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None):
|
||||
def check_auth_blocking(self, user_id=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403, self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
limit_type=self.hs.config.hs_disabled_limit_type
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
# If the user is already part of the MAU cohort
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403, "Monthly Active User Limit Exceeded",
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
limit_type="monthly_active_user"
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -71,6 +71,7 @@ class EventTypes(object):
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
Encryption = "m.room.encryption"
|
||||
|
||||
# These are used for validation
|
||||
Message = "m.room.message"
|
||||
@@ -78,7 +79,6 @@ class EventTypes(object):
|
||||
Name = "m.room.name"
|
||||
|
||||
ServerACL = "m.room.server_acl"
|
||||
Pinned = "m.room.pinned_events"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
@@ -98,17 +98,9 @@ class ThirdPartyEntityKind(object):
|
||||
LOCATION = "location"
|
||||
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
DEFAULT_ROOM_VERSION = "1"
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"}
|
||||
|
||||
@@ -56,7 +56,7 @@ class Codes(object):
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
|
||||
RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED"
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
|
||||
@@ -238,11 +238,11 @@ class ResourceLimitError(SynapseError):
|
||||
"""
|
||||
def __init__(
|
||||
self, code, msg,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=None,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=None,
|
||||
limit_type=None,
|
||||
):
|
||||
self.admin_contact = admin_contact
|
||||
self.admin_uri = admin_uri
|
||||
self.limit_type = limit_type
|
||||
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
||||
|
||||
@@ -250,7 +250,7 @@ class ResourceLimitError(SynapseError):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
admin_contact=self.admin_contact,
|
||||
admin_uri=self.admin_uri,
|
||||
limit_type=self.limit_type
|
||||
)
|
||||
|
||||
|
||||
@@ -251,7 +251,6 @@ class FilterCollection(object):
|
||||
"include_leave", False
|
||||
)
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
self.event_format = filter_json.get("event_format", "client")
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
|
||||
@@ -51,7 +51,10 @@ class AppserviceSlaveStore(
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
DATASTORE_CLASS = AppserviceSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -74,7 +74,10 @@ class ClientReaderSlavedStore(
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = ClientReaderSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -45,11 +45,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
ProfileDisplaynameRestServlet,
|
||||
ProfileRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinRoomAliasServlet,
|
||||
RoomMembershipRestServlet,
|
||||
@@ -58,7 +53,6 @@ from synapse.rest.client.v1.room import (
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -68,9 +62,6 @@ logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
@@ -90,7 +81,10 @@ class EventCreatorSlavedStore(
|
||||
|
||||
|
||||
class EventCreatorServer(HomeServer):
|
||||
DATASTORE_CLASS = EventCreatorSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -107,9 +101,6 @@ class EventCreatorServer(HomeServer):
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
ProfileAvatarURLRestServlet(self).register(resource)
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
|
||||
@@ -72,7 +72,10 @@ class FederationReaderSlavedStore(
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationReaderSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -78,7 +78,10 @@ class FederationSenderSlaveStore(
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
DATASTORE_CLASS = FederationSenderSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -148,7 +148,10 @@ class FrontendProxySlavedStore(
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
DATASTORE_CLASS = FrontendProxySlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore, are_all_users_on_domain
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
@@ -111,8 +111,6 @@ def build_resource_for_web_client(hs):
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
@@ -307,10 +305,6 @@ class SynapseHomeServer(HomeServer):
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
"Registered users with reserved threepids"
|
||||
)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
@@ -362,13 +356,13 @@ def setup(config_options):
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
|
||||
try:
|
||||
with hs.get_db_conn(run_new_connection=False) as db_conn:
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
db_conn.commit()
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
@@ -535,14 +529,10 @@ def run(hs):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
current_mau_count = 0
|
||||
reserved_count = 0
|
||||
store = hs.get_datastore()
|
||||
count = 0
|
||||
if hs.config.limit_usage_by_mau:
|
||||
current_mau_count = yield store.get_monthly_active_count()
|
||||
reserved_count = yield store.get_registered_reserved_users_count()
|
||||
current_mau_gauge.set(float(current_mau_count))
|
||||
registered_reserved_users_mau_gauge.set(float(reserved_count))
|
||||
count = yield hs.get_datastore().get_monthly_active_count()
|
||||
current_mau_gauge.set(float(count))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
|
||||
hs.get_datastore().initialise_reserved_users(
|
||||
|
||||
@@ -60,7 +60,10 @@ class MediaRepositorySlavedStore(
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
DATASTORE_CLASS = MediaRepositorySlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -78,7 +78,10 @@ class PusherSlaveStore(
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
DATASTORE_CLASS = PusherSlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
@@ -249,7 +249,10 @@ class SynchrotronApplicationService(object):
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
DATASTORE_CLASS = SynchrotronSlavedStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -94,7 +94,10 @@ class UserDirectorySlaveStore(
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
DATASTORE_CLASS = UserDirectorySlaveStore
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
import urllib
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -99,7 +98,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
@@ -120,7 +119,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
@@ -154,7 +153,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.parse.quote(protocol)
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
@@ -189,7 +188,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.parse.quote(protocol)
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
@@ -229,7 +228,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
txn_id = str(txn_id)
|
||||
|
||||
uri = service.url + ("/transactions/%s" %
|
||||
urllib.parse.quote(txn_id))
|
||||
urllib.quote(txn_id))
|
||||
try:
|
||||
yield self.put_json(
|
||||
uri=uri,
|
||||
|
||||
@@ -21,7 +21,7 @@ from .consent_config import ConsentConfig
|
||||
from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt_config import JWTConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
from .metrics import MetricsConfig
|
||||
|
||||
@@ -33,7 +33,15 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.check_is_for_allowed_local_3pids = config.get(
|
||||
"check_is_for_allowed_local_3pids", None
|
||||
)
|
||||
self.allow_invited_3pids = config.get("allow_invited_3pids", False)
|
||||
|
||||
self.disable_3pid_changes = config.get("disable_3pid_changes", False)
|
||||
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
@@ -45,6 +53,15 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||
|
||||
self.disable_set_displayname = config.get("disable_set_displayname", False)
|
||||
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
|
||||
|
||||
self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
|
||||
if not isinstance(self.replicate_user_profiles_to, list):
|
||||
self.replicate_user_profiles_to = [self.replicate_user_profiles_to, ]
|
||||
|
||||
self.chain_register = config.get("chain_register", None)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
|
||||
@@ -60,9 +77,26 @@ class RegistrationConfig(Config):
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Derive the user's matrix ID from a type of 3PID used when registering.
|
||||
# This overrides any matrix ID the user proposes when calling /register
|
||||
# The 3PID type should be present in registrations_require_3pid to avoid
|
||||
# users failing to register if they don't specify the right kind of 3pid.
|
||||
#
|
||||
# register_mxid_from_3pid: email
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# Use an Identity Server to establish which 3PIDs are allowed to register?
|
||||
# Overrides allowed_local_3pids below.
|
||||
# check_is_for_allowed_local_3pids: matrix.org
|
||||
#
|
||||
# If you are using an IS you can also check whether that IS registers
|
||||
# pending invites for the given 3PID (and then allow it to sign up on
|
||||
# the platform):
|
||||
#
|
||||
# allow_invited_3pids: False
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: ".*@matrix\\.org"
|
||||
@@ -71,6 +105,11 @@ class RegistrationConfig(Config):
|
||||
# - medium: msisdn
|
||||
# pattern: "\\+44"
|
||||
|
||||
# If true, stop users from trying to change the 3PIDs associated with
|
||||
# their accounts.
|
||||
#
|
||||
# disable_3pid_changes: False
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
@@ -94,10 +133,32 @@ class RegistrationConfig(Config):
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
# If enabled, user IDs, display names and avatar URLs will be replicated
|
||||
# to this server whenever they change.
|
||||
# This is an experimental API currently implemented by sydent to support
|
||||
# cross-homeserver user directories.
|
||||
# replicate_user_profiles_to: example.com
|
||||
|
||||
# If specified, attempt to replay registrations on the given target
|
||||
# homeserver and identity server. The HS is authed via a given shared secret
|
||||
# chain_register:
|
||||
# hs: https://shadow.example.com
|
||||
# hs_shared_secret: 12u394refgbdhivsia
|
||||
# is: https://shadow-is.example.com
|
||||
|
||||
# If enabled, don't let users set their own display names/avatars
|
||||
# other than for the very first time (unless they are a server admin).
|
||||
# Useful when provisioning users based on the contents of a 3rd party
|
||||
# directory and to avoid ambiguities.
|
||||
#
|
||||
# disable_set_displayname: False
|
||||
# disable_set_avatar_url: False
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
|
||||
""" % locals()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
|
||||
@@ -77,15 +77,10 @@ class ServerConfig(Config):
|
||||
self.max_mau_value = config.get(
|
||||
"max_mau_value", 0,
|
||||
)
|
||||
|
||||
self.mau_limits_reserved_threepids = config.get(
|
||||
"mau_limit_reserved_threepids", []
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get(
|
||||
"mau_trial_days", 0,
|
||||
)
|
||||
|
||||
# Options to disable HS
|
||||
self.hs_disabled = config.get("hs_disabled", False)
|
||||
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
||||
@@ -93,7 +88,7 @@ class ServerConfig(Config):
|
||||
|
||||
# Admin uri to direct users at should their instance become blocked
|
||||
# due to resource constraints
|
||||
self.admin_contact = config.get("admin_contact", None)
|
||||
self.admin_uri = config.get("admin_uri", None)
|
||||
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
@@ -357,7 +352,7 @@ class ServerConfig(Config):
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
# admin_uri: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
@@ -370,7 +365,6 @@ class ServerConfig(Config):
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
@@ -404,23 +398,6 @@ class ServerConfig(Config):
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def is_threepid_reserved(config, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
Args:
|
||||
config(ServerConfig) - to access server config attributes
|
||||
threepid(dict) - The threepid to test for
|
||||
|
||||
Returns:
|
||||
boolean Is the threepid undertest reserved_user
|
||||
"""
|
||||
|
||||
for tp in config.mau_limits_reserved_threepids:
|
||||
if (threepid['medium'] == tp['medium']
|
||||
and threepid['address'] == tp['address']):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def read_gc_thresholds(thresholds):
|
||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||
the thresholds are integers if thresholds are supplied.
|
||||
|
||||
@@ -23,11 +23,15 @@ class UserDirectoryConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.user_directory_search_all_users = False
|
||||
self.user_directory_defer_to_id_server = None
|
||||
user_directory_config = config.get("user_directory", None)
|
||||
if user_directory_config:
|
||||
self.user_directory_search_all_users = (
|
||||
user_directory_config.get("search_all_users", False)
|
||||
)
|
||||
self.user_directory_defer_to_id_server = (
|
||||
user_directory_config.get("defer_to_id_server", None)
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -41,4 +45,9 @@ class UserDirectoryConfig(Config):
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
#
|
||||
# If this is set, user search will be delegated to this ID server instead
|
||||
# of synapse performing the search itself.
|
||||
# This is an experimental API.
|
||||
# defer_to_id_server: id.example.com
|
||||
"""
|
||||
|
||||
@@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object):
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host,
|
||||
host.decode('utf-8'),
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ import logging
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.names.error import DomainError
|
||||
from twisted.web.http import HTTPClient
|
||||
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
@@ -49,14 +47,12 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
server_response, server_certificate = yield protocol.remote_key
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
except SynapseKeyClientError as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
if e.status.startswith(b"4"):
|
||||
logger.exception("Error getting key for %r" % (server_name,))
|
||||
if e.status.startswith("4"):
|
||||
# Don't retry for 4xx responses.
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
except (ConnectError, DomainError) as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
except Exception as e:
|
||||
logger.exception("Error getting key for %r", server_name)
|
||||
logger.exception(e)
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
|
||||
|
||||
@@ -82,12 +78,6 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
if not isinstance(self.path, bytes):
|
||||
self.path = self.path.encode('ascii')
|
||||
|
||||
if not isinstance(self.host, bytes):
|
||||
self.host = self.host.encode('ascii')
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
|
||||
@@ -16,10 +16,9 @@
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import urllib
|
||||
from collections import namedtuple
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -41,7 +40,6 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
PreserveLoggingContext,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
@@ -218,34 +216,23 @@ class Keyring(object):
|
||||
servers have completed. Follows the synapse rules of logcontext
|
||||
preservation.
|
||||
"""
|
||||
loop_count = 1
|
||||
while True:
|
||||
wait_on = [
|
||||
(server_name, self.key_downloads[server_name])
|
||||
self.key_downloads[server_name]
|
||||
for server_name in server_names
|
||||
if server_name in self.key_downloads
|
||||
]
|
||||
if not wait_on:
|
||||
if wait_on:
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList(wait_on)
|
||||
else:
|
||||
break
|
||||
logger.info(
|
||||
"Waiting for existing lookups for %s to complete [loop %i]",
|
||||
[w[0] for w in wait_on], loop_count,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList((w[1] for w in wait_on))
|
||||
|
||||
loop_count += 1
|
||||
|
||||
ctx = LoggingContext.current_context()
|
||||
|
||||
def rm(r, server_name_):
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.debug("Releasing key lookup lock on %s", server_name_)
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
self.key_downloads.pop(server_name_, None)
|
||||
return r
|
||||
|
||||
for server_name, deferred in server_to_deferred.items():
|
||||
logger.debug("Got key lookup lock on %s", server_name)
|
||||
self.key_downloads[server_name] = deferred
|
||||
deferred.addBoth(rm, server_name)
|
||||
|
||||
@@ -445,7 +432,7 @@ class Keyring(object):
|
||||
# an incoming request.
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
path=b"/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
@@ -526,8 +513,8 @@ class Keyring(object):
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory,
|
||||
path=("/_matrix/key/v2/server/%s" % (
|
||||
urllib.parse.quote(requested_key_id),
|
||||
path=(b"/_matrix/key/v2/server/%s" % (
|
||||
urllib.quote(requested_key_id),
|
||||
)).encode("ascii"),
|
||||
)
|
||||
|
||||
|
||||
@@ -13,20 +13,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import DeferredList
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -136,45 +133,34 @@ class FederationBase(object):
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
|
||||
|
||||
redacted_pdus = [
|
||||
prune_event(pdu)
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
ctx = logcontext.LoggingContext.current_context()
|
||||
|
||||
def callback(_, pdu):
|
||||
def callback(_, pdu, redacted):
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||
# incidents.
|
||||
#
|
||||
# This is just a heuristic, so we just assume that if the keys are
|
||||
# about the same between the redacted and received events, then the
|
||||
# received event was probably a redacted copy (but we then use our
|
||||
# *actual* redacted copy to be on the safe side.)
|
||||
redacted_event = prune_event(pdu)
|
||||
if (
|
||||
set(six.iterkeys(redacted_event)) == set(six.iterkeys(pdu)) and
|
||||
set(six.iterkeys(redacted_event.content))
|
||||
== set(six.iterkeys(pdu.content))
|
||||
):
|
||||
logger.info(
|
||||
"Event %s seems to have been redacted; using our redacted "
|
||||
"copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Event %s content has been tampered, redacting",
|
||||
pdu.event_id, pdu.get_pdu_json(),
|
||||
)
|
||||
return redacted_event
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return redacted
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
)
|
||||
return prune_event(pdu)
|
||||
return redacted
|
||||
|
||||
return pdu
|
||||
|
||||
@@ -182,121 +168,21 @@ class FederationBase(object):
|
||||
failure.trap(SynapseError)
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id, failure.getErrorMessage(),
|
||||
"Signature check failed for %s",
|
||||
pdu.event_id,
|
||||
)
|
||||
return failure
|
||||
|
||||
for deferred, pdu in zip(deferreds, pdus):
|
||||
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
||||
deferred.addCallbacks(
|
||||
callback, errback,
|
||||
callbackArgs=[pdu],
|
||||
callbackArgs=[pdu, redacted],
|
||||
errbackArgs=[pdu],
|
||||
)
|
||||
|
||||
return deferreds
|
||||
|
||||
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", [
|
||||
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
|
||||
])):
|
||||
pass
|
||||
|
||||
|
||||
def _check_sigs_on_pdus(keyring, pdus):
|
||||
"""Check that the given events are correctly signed
|
||||
|
||||
Args:
|
||||
keyring (synapse.crypto.Keyring): keyring object to do the checks
|
||||
pdus (Collection[EventBase]): the events to be checked
|
||||
|
||||
Returns:
|
||||
List[Deferred]: a Deferred for each event in pdus, which will either succeed if
|
||||
the signatures are valid, or fail (with a SynapseError) if not.
|
||||
"""
|
||||
|
||||
# (currently this is written assuming the v1 room structure; we'll probably want a
|
||||
# separate function for checking v2 rooms)
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
#
|
||||
# (a) the server which created the event_id
|
||||
#
|
||||
# (b) the sender's server.
|
||||
#
|
||||
# - except in the case of invites created from a 3pid invite, which are exempt
|
||||
# from this check, because the sender has to match that of the original 3pid
|
||||
# invite, but the event may come from a different HS, for reasons that I don't
|
||||
# entirely grok (why do the senders have to match? and if they do, why doesn't the
|
||||
# joining server ask the inviting server to do the switcheroo with
|
||||
# exchange_third_party_invite?).
|
||||
#
|
||||
# That's pretty awful, since redacting such an invite will render it invalid
|
||||
# (because it will then look like a regular invite without a valid signature),
|
||||
# and signatures are *supposed* to be valid whether or not an event has been
|
||||
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
|
||||
#
|
||||
# let's start by getting the domain for each pdu, and flattening the event back
|
||||
# to JSON.
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
event_id_domain=get_domain_from_id(p.event_id),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
for p in pdus
|
||||
]
|
||||
|
||||
# first make sure that the event is signed by the event_id's domain
|
||||
deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.event_id_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check, deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks.
|
||||
pdus_to_check_sender = [
|
||||
p for p in pdus_to_check
|
||||
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.sender_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check_sender
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# replace lists of deferreds with single Deferreds
|
||||
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
||||
|
||||
|
||||
def _flatten_deferred_list(deferreds):
|
||||
"""Given a list of one or more deferreds, either return the single deferred, or
|
||||
combine into a DeferredList.
|
||||
"""
|
||||
if len(deferreds) > 1:
|
||||
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
||||
else:
|
||||
assert len(deferreds) == 1
|
||||
return deferreds[0]
|
||||
|
||||
|
||||
def _is_invite_via_3pid(event):
|
||||
return (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
|
||||
|
||||
@@ -271,10 +271,10 @@ class FederationClient(FederationBase):
|
||||
event_id, destination, e,
|
||||
)
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(str(e))
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(str(e))
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
@@ -510,7 +510,7 @@ class FederationClient(FederationBase):
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description, destination, e.code, e.args[0],
|
||||
description, destination, e.code, e.message,
|
||||
)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
@@ -875,7 +875,7 @@ class FederationClient(FederationBase):
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_third_party_invite via %s: %s",
|
||||
destination, str(e)
|
||||
destination, e.message
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@@ -99,7 +99,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, origin, transaction_data):
|
||||
def on_incoming_transaction(self, transaction_data):
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
@@ -108,33 +108,34 @@ class FederationServer(FederationBase):
|
||||
|
||||
if not transaction.transaction_id:
|
||||
raise Exception("Transaction missing transaction_id")
|
||||
if not transaction.origin:
|
||||
raise Exception("Transaction missing origin")
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||
|
||||
# use a linearizer to ensure that we don't process the same transaction
|
||||
# multiple times in parallel.
|
||||
with (yield self._transaction_linearizer.queue(
|
||||
(origin, transaction.transaction_id),
|
||||
(transaction.origin, transaction.transaction_id),
|
||||
)):
|
||||
result = yield self._handle_incoming_transaction(
|
||||
origin, transaction, request_time,
|
||||
transaction, request_time,
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
def _handle_incoming_transaction(self, transaction, request_time):
|
||||
""" Process an incoming transaction and return the HTTP response
|
||||
|
||||
Args:
|
||||
origin (unicode): the server making the request
|
||||
transaction (Transaction): incoming transaction
|
||||
request_time (int): timestamp that the HTTP request arrived at
|
||||
|
||||
Returns:
|
||||
Deferred[(int, object)]: http response code and body
|
||||
"""
|
||||
response = yield self.transaction_actions.have_responded(origin, transaction)
|
||||
response = yield self.transaction_actions.have_responded(transaction)
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
@@ -148,7 +149,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
origin_host, _ = parse_server_name(transaction.origin)
|
||||
|
||||
pdus_by_room = {}
|
||||
|
||||
@@ -189,7 +190,7 @@ class FederationServer(FederationBase):
|
||||
event_id = pdu.event_id
|
||||
try:
|
||||
yield self._handle_received_pdu(
|
||||
origin, pdu
|
||||
transaction.origin, pdu
|
||||
)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
@@ -211,7 +212,7 @@ class FederationServer(FederationBase):
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
yield self.received_edu(
|
||||
origin,
|
||||
transaction.origin,
|
||||
edu.edu_type,
|
||||
edu.content
|
||||
)
|
||||
@@ -223,7 +224,6 @@ class FederationServer(FederationBase):
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
yield self.transaction_actions.set_response(
|
||||
origin,
|
||||
transaction,
|
||||
200, response
|
||||
)
|
||||
@@ -838,9 +838,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
)
|
||||
|
||||
return self._send_edu(
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
@@ -851,6 +851,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
return handler(args)
|
||||
|
||||
return self._get_query_client(
|
||||
query_type=query_type,
|
||||
args=args,
|
||||
query_type=query_type,
|
||||
args=args,
|
||||
)
|
||||
|
||||
@@ -36,7 +36,7 @@ class TransactionActions(object):
|
||||
self.store = datastore
|
||||
|
||||
@log_function
|
||||
def have_responded(self, origin, transaction):
|
||||
def have_responded(self, transaction):
|
||||
""" Have we already responded to a transaction with the same id and
|
||||
origin?
|
||||
|
||||
@@ -50,11 +50,11 @@ class TransactionActions(object):
|
||||
"transaction_id")
|
||||
|
||||
return self.store.get_received_txn_response(
|
||||
transaction.transaction_id, origin
|
||||
transaction.transaction_id, transaction.origin
|
||||
)
|
||||
|
||||
@log_function
|
||||
def set_response(self, origin, transaction, code, response):
|
||||
def set_response(self, transaction, code, response):
|
||||
""" Persist how we responded to a transaction.
|
||||
|
||||
Returns:
|
||||
@@ -66,7 +66,7 @@ class TransactionActions(object):
|
||||
|
||||
return self.store.set_received_txn_response(
|
||||
transaction.transaction_id,
|
||||
origin,
|
||||
transaction.origin,
|
||||
code,
|
||||
response,
|
||||
)
|
||||
|
||||
@@ -32,7 +32,7 @@ Events are replicated via a separate events stream.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import iteritems
|
||||
from six import iteritems, itervalues
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
@@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
user_ids = set(
|
||||
user_id
|
||||
for uids in self.presence_changed.values()
|
||||
for uids in itervalues(self.presence_changed)
|
||||
for user_id in uids
|
||||
)
|
||||
|
||||
|
||||
@@ -463,19 +463,7 @@ class TransactionQueue(object):
|
||||
# pending_transactions flag.
|
||||
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
|
||||
# We can only include at most 50 PDUs per transactions
|
||||
pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
|
||||
if leftover_pdus:
|
||||
self.pending_pdus_by_dest[destination] = leftover_pdus
|
||||
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
|
||||
# We can only include at most 100 EDUs per transactions
|
||||
pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
|
||||
if leftover_edus:
|
||||
self.pending_edus_by_dest[destination] = leftover_edus
|
||||
|
||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||
|
||||
pending_edus.extend(
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
import urllib
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -107,7 +106,7 @@ class TransportLayerClient(object):
|
||||
dest (str)
|
||||
room_id (str)
|
||||
event_tuples (list)
|
||||
limit (int)
|
||||
limt (int)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
@@ -952,4 +951,4 @@ def _create_path(prefix, path, *args):
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||
return prefix + path % tuple(urllib.quote(arg, "") for arg in args)
|
||||
|
||||
@@ -90,8 +90,8 @@ class Authenticator(object):
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request, content):
|
||||
json_request = {
|
||||
"method": request.method.decode('ascii'),
|
||||
"uri": request.uri.decode('ascii'),
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
"destination": self.server_name,
|
||||
"signatures": {},
|
||||
}
|
||||
@@ -252,7 +252,7 @@ class BaseFederationServlet(object):
|
||||
by the callback method. None if the request has already been handled.
|
||||
"""
|
||||
content = None
|
||||
if request.method in [b"PUT", b"POST"]:
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
@@ -261,10 +261,10 @@ class BaseFederationServlet(object):
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.warn("authenticate_request failed: missing authentication")
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warn("authenticate_request failed: %s", e)
|
||||
except Exception:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
|
||||
if origin:
|
||||
@@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
|
||||
try:
|
||||
code, response = yield self.handler.on_incoming_transaction(
|
||||
origin, transaction_data,
|
||||
transaction_data
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("on_incoming_transaction failed")
|
||||
@@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
return self.handler.on_context_state_request(
|
||||
origin,
|
||||
context,
|
||||
parse_string_from_args(query, "event_id", None),
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
@@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
|
||||
return self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
parse_string_from_args(query, "event_id", None),
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
@@ -405,12 +405,14 @@ class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, context):
|
||||
versions = [x.decode('ascii') for x in query[b"v"]]
|
||||
limit = parse_integer_from_args(query, "limit", None)
|
||||
versions = query["v"]
|
||||
limits = query["limit"]
|
||||
|
||||
if not limit:
|
||||
if not limits:
|
||||
return defer.succeed((400, {"error": "Did not include limit param"}))
|
||||
|
||||
limit = int(limits[-1])
|
||||
|
||||
return self.handler.on_backfill_request(origin, context, versions, limit)
|
||||
|
||||
|
||||
@@ -421,7 +423,7 @@ class FederationQueryServlet(BaseFederationServlet):
|
||||
def on_GET(self, origin, content, query, query_type):
|
||||
return self.handler.on_query_request(
|
||||
query_type,
|
||||
{k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()}
|
||||
{k: v[0].decode("utf-8") for k, v in query.items()}
|
||||
)
|
||||
|
||||
|
||||
@@ -628,14 +630,14 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
token = query.get(b"access_token", [None])[0]
|
||||
token = query.get("access_token", [None])[0]
|
||||
if token is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||
}))
|
||||
return
|
||||
|
||||
user_id = yield self.handler.on_openid_userinfo(token.decode('ascii'))
|
||||
user_id = yield self.handler.on_openid_userinfo(token)
|
||||
|
||||
if user_id is None:
|
||||
defer.returnValue((401, {
|
||||
|
||||
@@ -895,24 +895,22 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
Args:
|
||||
password (unicode): Password to hash.
|
||||
stored_hash (bytes): Expected hash value.
|
||||
stored_hash (unicode): Expected hash value.
|
||||
|
||||
Returns:
|
||||
Deferred(bool): Whether self.hash(password) == stored_hash.
|
||||
"""
|
||||
|
||||
def _do_validate_hash():
|
||||
# Normalise the Unicode in the password
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
return bcrypt.checkpw(
|
||||
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
|
||||
stored_hash
|
||||
stored_hash.encode('utf8')
|
||||
)
|
||||
|
||||
if stored_hash:
|
||||
if not isinstance(stored_hash, bytes):
|
||||
stored_hash = stored_hash.encode('ascii')
|
||||
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(
|
||||
self.hs.get_reactor(),
|
||||
|
||||
@@ -33,6 +33,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._identity_handler = hs.get_handlers().identity_handler
|
||||
self._profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
@@ -94,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
|
||||
yield self.store.user_set_password_hash(user_id, None)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
yield self._profile_handler.set_active(user, False)
|
||||
|
||||
# Add the user to a table of users pending deactivation (ie.
|
||||
# removal from all the rooms they're a member of)
|
||||
yield self.store.add_user_pending_deactivation(user_id)
|
||||
|
||||
@@ -330,8 +330,7 @@ class E2eKeysHandler(object):
|
||||
(algorithm, key_id, ex_json, key)
|
||||
)
|
||||
else:
|
||||
new_keys.append((
|
||||
algorithm, key_id, encode_canonical_json(key).decode('ascii')))
|
||||
new_keys.append((algorithm, key_id, encode_canonical_json(key)))
|
||||
|
||||
yield self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, new_keys
|
||||
@@ -359,7 +358,7 @@ def _exception_to_failure(e):
|
||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||
# give a string for e.message, which json then fails to serialize.
|
||||
return {
|
||||
"status": 503, "message": str(e),
|
||||
"status": 503, "message": str(e.message),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -291,9 +291,8 @@ class FederationHandler(BaseHandler):
|
||||
ev_ids, get_prev_content=False, check_redacted=False
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
||||
state_map = yield resolve_events_with_factory(
|
||||
room_version, state_groups, {pdu.event_id: pdu}, fetch
|
||||
state_groups, {pdu.event_id: pdu}, fetch
|
||||
)
|
||||
|
||||
state = (yield self.store.get_events(state_map.values())).values()
|
||||
@@ -594,7 +593,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
required_auth = set(
|
||||
a_id
|
||||
for event in events + list(state_events.values()) + list(auth_events.values())
|
||||
for event in events + state_events.values() + auth_events.values()
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
auth_events.update({
|
||||
@@ -802,7 +801,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(str(e))
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
@@ -1358,7 +1357,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if state_groups:
|
||||
_, state = list(state_groups.items()).pop()
|
||||
_, state = state_groups.items().pop()
|
||||
results = state
|
||||
|
||||
if event.is_state():
|
||||
@@ -1829,10 +1828,7 @@ class FederationHandler(BaseHandler):
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
new_state = yield self.state_handler.resolve_events(
|
||||
room_version,
|
||||
new_state = self.state_handler.resolve_events(
|
||||
[list(local_view.values()), list(remote_view.values())],
|
||||
event
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,7 +16,9 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -26,22 +29,21 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseProfileHandler(BaseHandler):
|
||||
"""Handles fetching and updating user profile information.
|
||||
class ProfileHandler(BaseHandler):
|
||||
PROFILE_UPDATE_MS = 60 * 1000
|
||||
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
BaseProfileHandler can be instantiated directly on workers and will
|
||||
delegate to master when necessary. The master process should use the
|
||||
subclass MasterProfileHandler
|
||||
"""
|
||||
PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(BaseProfileHandler, self).__init__(hs)
|
||||
super(ProfileHandler, self).__init__(hs)
|
||||
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
@@ -50,6 +52,84 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
if hs.config.worker_app is None:
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
reactor.callWhenRunning(self._assign_profile_replication_batches)
|
||||
reactor.callWhenRunning(self._replicate_profiles)
|
||||
# Add a looping call to replicate_profiles: this handles retries
|
||||
# if the replication is unsuccessful when the user updated their
|
||||
# profile.
|
||||
self.clock.looping_call(
|
||||
self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _assign_profile_replication_batches(self):
|
||||
"""If no profile replication has been done yet, allocate replication batch
|
||||
numbers to each profile to start the replication process.
|
||||
"""
|
||||
logger.info("Assigning profile batch numbers...")
|
||||
total = 0
|
||||
while True:
|
||||
assigned = yield self.store.assign_profile_batch()
|
||||
total += assigned
|
||||
if assigned == 0:
|
||||
break
|
||||
logger.info("Assigned %d profile batch numbers", total)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_profiles(self):
|
||||
"""If any profile data has been updated and not pushed to the replication targets,
|
||||
replicate it.
|
||||
"""
|
||||
host_batches = yield self.store.get_replication_hosts()
|
||||
latest_batch = yield self.store.get_latest_profile_replication_batch_number()
|
||||
if latest_batch is None:
|
||||
latest_batch = -1
|
||||
for repl_host in self.hs.config.replicate_user_profiles_to:
|
||||
if repl_host not in host_batches:
|
||||
host_batches[repl_host] = -1
|
||||
try:
|
||||
for i in xrange(host_batches[repl_host] + 1, latest_batch + 1):
|
||||
yield self._replicate_host_profile_batch(repl_host, i)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception while replicating to %s: aborting for now", repl_host,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_host_profile_batch(self, host, batchnum):
|
||||
logger.info("Replicating profile batch %d to %s", batchnum, host)
|
||||
batch_rows = yield self.store.get_profile_batch(batchnum)
|
||||
batch = {
|
||||
UserID(r["user_id"], self.hs.hostname).to_string(): ({
|
||||
"display_name": r["displayname"],
|
||||
"avatar_url": r["avatar_url"],
|
||||
} if r["active"] else None) for r in batch_rows
|
||||
}
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
|
||||
body = {
|
||||
"batchnum": batchnum,
|
||||
"batch": batch,
|
||||
"origin_server": self.hs.hostname,
|
||||
}
|
||||
signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(url, signed_body)
|
||||
yield self.store.update_replication_batch_for_host(host, batchnum)
|
||||
logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
|
||||
except Exception:
|
||||
# This will get retried when the looping call next comes around
|
||||
logger.exception("Failed to replicate profile batch %d to %s", batchnum, host)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile(self, user_id):
|
||||
target_user = UserID.from_string(user_id)
|
||||
@@ -149,19 +229,30 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
|
||||
"""target_user is the user whose displayname is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
"""target_user is the UserID whose displayname is to be changed;
|
||||
requester is the authenticated user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if not by_admin and target_user != requester.user:
|
||||
if not by_admin and requester and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's displayname")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_displayname:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.display_name:
|
||||
raise SynapseError(400, "Changing displayname is disabled on this server")
|
||||
|
||||
if new_displayname == '':
|
||||
new_displayname = None
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
yield self.store.set_profile_displayname(
|
||||
target_user.localpart, new_displayname
|
||||
target_user.localpart, new_displayname, new_batchnum
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -170,7 +261,32 @@ class BaseProfileHandler(BaseHandler):
|
||||
target_user.to_string(), profile
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
if requester:
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_active(self, target_user, active):
|
||||
"""
|
||||
Sets the 'active' flag on a user profile. If set to false, the user account is
|
||||
considered deactivated.
|
||||
Note that unlike set_displayname and set_avatar_url, this does *not* perform
|
||||
authorization checks! This is because the only place it's used currently is
|
||||
in account deactivation where we've already done these checks anyway.
|
||||
"""
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
yield self.store.set_profile_active(
|
||||
target_user.localpart, active, new_batchnum
|
||||
)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_avatar_url(self, target_user):
|
||||
@@ -214,8 +330,19 @@ class BaseProfileHandler(BaseHandler):
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's avatar_url")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_avatar_url:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.avatar_url:
|
||||
raise SynapseError(400, "Changing avatar url is disabled on this server")
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
yield self.store.set_profile_avatar_url(
|
||||
target_user.localpart, new_avatar_url
|
||||
target_user.localpart, new_avatar_url, new_batchnum,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -226,6 +353,9 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
user = UserID.from_string(args["user_id"])
|
||||
@@ -281,20 +411,6 @@ class BaseProfileHandler(BaseHandler):
|
||||
room_id, str(e.message)
|
||||
)
|
||||
|
||||
|
||||
class MasterProfileHandler(BaseProfileHandler):
|
||||
PROFILE_UPDATE_MS = 60 * 1000
|
||||
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(MasterProfileHandler, self).__init__(hs)
|
||||
|
||||
assert hs.config.worker_app is None
|
||||
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
def _start_update_remote_profile_cache(self):
|
||||
return run_as_background_process(
|
||||
"Update remote profile", self._update_remote_profile_cache,
|
||||
|
||||
@@ -51,6 +51,7 @@ class RegistrationHandler(BaseHandler):
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
@@ -124,8 +125,8 @@ class RegistrationHandler(BaseHandler):
|
||||
generate_token=True,
|
||||
guest_access_token=None,
|
||||
make_guest=False,
|
||||
display_name=None,
|
||||
admin=False,
|
||||
threepid=None,
|
||||
):
|
||||
"""Registers a new client on the server.
|
||||
|
||||
@@ -140,13 +141,14 @@ class RegistrationHandler(BaseHandler):
|
||||
since it offers no means of associating a device_id with the
|
||||
access_token. Instead you should call auth_handler.issue_access_token
|
||||
after registration.
|
||||
display_name (str): The displayname to set for this user, if any
|
||||
Returns:
|
||||
A tuple of (user_id, access_token).
|
||||
Raises:
|
||||
RegistrationError if there was a problem registering.
|
||||
"""
|
||||
|
||||
yield self.auth.check_auth_blocking(threepid=threepid)
|
||||
yield self.auth.check_auth_blocking()
|
||||
password_hash = None
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
@@ -178,13 +180,20 @@ class RegistrationHandler(BaseHandler):
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=(
|
||||
# If the user was a guest then they already have a profile
|
||||
None if was_guest else user.localpart
|
||||
),
|
||||
admin=admin,
|
||||
)
|
||||
|
||||
if display_name is None:
|
||||
display_name = (
|
||||
# If the user was a guest then they already have a profile
|
||||
None if was_guest else user.localpart
|
||||
)
|
||||
|
||||
if display_name:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, display_name, by_admin=True,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
@@ -209,8 +218,12 @@ class RegistrationHandler(BaseHandler):
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
# if user id is taken, just generate another
|
||||
user = None
|
||||
@@ -254,8 +267,12 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
appservice_id=service_id,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -302,7 +319,10 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=None,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
except Exception as e:
|
||||
yield self.store.add_access_token_to_user(user_id, token)
|
||||
@@ -333,7 +353,9 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("got threepid with medium '%s' and address '%s'",
|
||||
threepid['medium'], threepid['address'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']):
|
||||
if not (
|
||||
yield check_3pid_allowed(self.hs, threepid['medium'], threepid['address'])
|
||||
):
|
||||
raise RegistrationError(
|
||||
403, "Third party identifier is not allowed"
|
||||
)
|
||||
@@ -375,6 +397,43 @@ class RegistrationHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def chain_register(self, localpart, auth_result, params):
|
||||
"""Invokes the current registration on another server, using
|
||||
shared secret registration, passing in any auth_results from
|
||||
other registration UI auth flows (e.g. validated 3pids)
|
||||
Useful for setting up shadow/backup accounts on a parallel deployment.
|
||||
"""
|
||||
|
||||
# TODO: retries
|
||||
|
||||
chained_hs = self.hs.config.chain_register.get("hs")
|
||||
|
||||
user = localpart.encode("utf-8")
|
||||
mac = hmac.new(
|
||||
key=self.hs.config.chain_register.get("hs_shared_secret").encode(),
|
||||
msg=user,
|
||||
digestmod=sha1,
|
||||
).hexdigest()
|
||||
|
||||
data = yield self.http_client.post_urlencoded_get_json(
|
||||
"https://%s%s" % (
|
||||
chained_hs, "/_matrix/client/r0/register"
|
||||
),
|
||||
{
|
||||
# XXX: auth_result is an unspecified extension for chained registration
|
||||
'auth_result': auth_result,
|
||||
'username': localpart,
|
||||
'password': params.get("password"),
|
||||
'bind_email': params.get("bind_email"),
|
||||
'bind_msisdn': params.get("bind_msisdn"),
|
||||
'device_id': params.get("device_id"),
|
||||
'initial_device_display_name': params.get("initial_device_display_name"),
|
||||
'inhibit_login': True,
|
||||
'mac': mac,
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
@@ -461,18 +520,15 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
if displayname is not None:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, displayname, by_admin=True,
|
||||
)
|
||||
else:
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||
|
||||
if displayname is not None:
|
||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, requester, displayname, by_admin=True,
|
||||
)
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
def auth_handler(self):
|
||||
|
||||
@@ -52,12 +52,14 @@ class RoomCreationHandler(BaseHandler):
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": True,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.PUBLIC_CHAT: {
|
||||
"join_rules": JoinRules.PUBLIC,
|
||||
@@ -425,6 +427,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=content,
|
||||
)
|
||||
|
||||
if "encryption_alg" in config:
|
||||
send(
|
||||
etype=EventTypes.Encryption,
|
||||
state_key="",
|
||||
content={
|
||||
'algorithm': config["encryption_alg"],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class RoomContextHandler(object):
|
||||
def __init__(self, hs):
|
||||
|
||||
@@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler):
|
||||
# Filter out rooms that we don't want to return
|
||||
rooms_to_scan = [
|
||||
r for r in sorted_rooms
|
||||
if r not in newly_unpublished and rooms_to_num_joined[r] > 0
|
||||
if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
|
||||
]
|
||||
|
||||
total_room_count = len(rooms_to_scan)
|
||||
|
||||
@@ -344,7 +344,6 @@ class RoomMemberHandler(object):
|
||||
latest_event_ids = (
|
||||
event_id for (event_id, _, _) in prev_events_and_hashes
|
||||
)
|
||||
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(
|
||||
room_id, latest_event_ids=latest_event_ids,
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ class SearchHandler(BaseHandler):
|
||||
batch_token = None
|
||||
if batch:
|
||||
try:
|
||||
b = decode_base64(batch).decode('ascii')
|
||||
b = decode_base64(batch)
|
||||
batch_group, batch_group_key, batch_token = b.split("\n")
|
||||
|
||||
assert batch_group is not None
|
||||
@@ -258,18 +258,18 @@ class SearchHandler(BaseHandler):
|
||||
# it returns more from the same group (if applicable) rather
|
||||
# than reverting to searching all results again.
|
||||
if batch_group and batch_group_key:
|
||||
global_next_batch = encode_base64(("%s\n%s\n%s" % (
|
||||
global_next_batch = encode_base64("%s\n%s\n%s" % (
|
||||
batch_group, batch_group_key, pagination_token
|
||||
)).encode('ascii'))
|
||||
))
|
||||
else:
|
||||
global_next_batch = encode_base64(("%s\n%s\n%s" % (
|
||||
global_next_batch = encode_base64("%s\n%s\n%s" % (
|
||||
"all", "", pagination_token
|
||||
)).encode('ascii'))
|
||||
))
|
||||
|
||||
for room_id, group in room_groups.items():
|
||||
group["next_batch"] = encode_base64(("%s\n%s\n%s" % (
|
||||
group["next_batch"] = encode_base64("%s\n%s\n%s" % (
|
||||
"room_id", room_id, pagination_token
|
||||
)).encode('ascii'))
|
||||
))
|
||||
|
||||
allowed_events.extend(room_events)
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.push.clientformat import format_push_rules_for_user
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -526,8 +525,6 @@ class SyncHandler(object):
|
||||
A deferred dict describing the room summary
|
||||
"""
|
||||
|
||||
# FIXME: we could/should get this from room_stats when matthew/stats lands
|
||||
|
||||
# FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305
|
||||
last_events, _ = yield self.store.get_recent_event_ids_for_room(
|
||||
room_id, end_token=now_token.room_key, limit=1,
|
||||
@@ -540,67 +537,44 @@ class SyncHandler(object):
|
||||
last_event = last_events[-1]
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
last_event.event_id, [
|
||||
(EventTypes.Member, None),
|
||||
(EventTypes.Name, ''),
|
||||
(EventTypes.CanonicalAlias, ''),
|
||||
]
|
||||
)
|
||||
|
||||
# this is heavily cached, thus: fast.
|
||||
details = yield self.store.get_room_summary(room_id)
|
||||
|
||||
member_ids = {
|
||||
state_key: event_id
|
||||
for (t, state_key), event_id in state_ids.iteritems()
|
||||
if t == EventTypes.Member
|
||||
}
|
||||
name_id = state_ids.get((EventTypes.Name, ''))
|
||||
canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ''))
|
||||
|
||||
summary = {}
|
||||
empty_ms = MemberSummary([], 0)
|
||||
|
||||
# FIXME: it feels very heavy to load up every single membership event
|
||||
# just to calculate the counts.
|
||||
member_events = yield self.store.get_events(member_ids.values())
|
||||
|
||||
joined_user_ids = []
|
||||
invited_user_ids = []
|
||||
|
||||
for ev in member_events.values():
|
||||
if ev.content.get("membership") == Membership.JOIN:
|
||||
joined_user_ids.append(ev.state_key)
|
||||
elif ev.content.get("membership") == Membership.INVITE:
|
||||
invited_user_ids.append(ev.state_key)
|
||||
|
||||
# TODO: only send these when they change.
|
||||
summary["m.joined_member_count"] = (
|
||||
details.get(Membership.JOIN, empty_ms).count
|
||||
)
|
||||
summary["m.invited_member_count"] = (
|
||||
details.get(Membership.INVITE, empty_ms).count
|
||||
)
|
||||
summary["m.joined_member_count"] = len(joined_user_ids)
|
||||
summary["m.invited_member_count"] = len(invited_user_ids)
|
||||
|
||||
# if the room has a name or canonical_alias set, we can skip
|
||||
# calculating heroes. we assume that if the event has contents, it'll
|
||||
# be a valid name or canonical_alias - i.e. we're checking that they
|
||||
# haven't been "deleted" by blatting {} over the top.
|
||||
if name_id:
|
||||
name = yield self.store.get_event(name_id, allow_none=False)
|
||||
if name and name.content:
|
||||
defer.returnValue(summary)
|
||||
if name_id or canonical_alias_id:
|
||||
defer.returnValue(summary)
|
||||
|
||||
if canonical_alias_id:
|
||||
canonical_alias = yield self.store.get_event(
|
||||
canonical_alias_id, allow_none=False,
|
||||
)
|
||||
if canonical_alias and canonical_alias.content:
|
||||
defer.returnValue(summary)
|
||||
# FIXME: order by stream ordering, not alphabetic
|
||||
|
||||
joined_user_ids = [
|
||||
r[0] for r in details.get(Membership.JOIN, empty_ms).members
|
||||
]
|
||||
invited_user_ids = [
|
||||
r[0] for r in details.get(Membership.INVITE, empty_ms).members
|
||||
]
|
||||
gone_user_ids = (
|
||||
[r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
|
||||
[r[0] for r in details.get(Membership.BAN, empty_ms).members]
|
||||
)
|
||||
|
||||
# FIXME: only build up a member_ids list for our heroes
|
||||
member_ids = {}
|
||||
for membership in (
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN
|
||||
):
|
||||
for user_id, event_id in details.get(membership, empty_ms).members:
|
||||
member_ids[user_id] = event_id
|
||||
|
||||
# FIXME: order by stream ordering rather than as returned by SQL
|
||||
me = sync_config.user.to_string()
|
||||
if (joined_user_ids or invited_user_ids):
|
||||
summary['m.heroes'] = sorted(
|
||||
@@ -612,11 +586,7 @@ class SyncHandler(object):
|
||||
)[0:5]
|
||||
else:
|
||||
summary['m.heroes'] = sorted(
|
||||
[
|
||||
user_id
|
||||
for user_id in gone_user_ids
|
||||
if user_id != me
|
||||
]
|
||||
[user_id for user_id in member_ids.keys() if user_id != me]
|
||||
)[0:5]
|
||||
|
||||
if not sync_config.filter_collection.lazy_load_members():
|
||||
@@ -749,26 +719,6 @@ class SyncHandler(object):
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
elif batch.limited:
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
# for now, we disable LL for gappy syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way
|
||||
# more state in the server than if we were LLing.
|
||||
#
|
||||
# We still have to filter timeline_start to LL entries (above) in order
|
||||
# for _calculate_state's LL logic to work, as we have to include LL
|
||||
# members for timeline senders in case they weren't loaded in the initial
|
||||
# sync. We do this by (counterintuitively) by filtering timeline_start
|
||||
# members to just be ones which were timeline senders, which then ensures
|
||||
# all of the rest get included in the state block (if we need to know
|
||||
# about them).
|
||||
types = None
|
||||
filtered_types = None
|
||||
|
||||
state_at_previous_sync = yield self.get_state_at(
|
||||
room_id, stream_position=since_token, types=types,
|
||||
filtered_types=filtered_types,
|
||||
@@ -779,29 +729,25 @@ class SyncHandler(object):
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
previous=state_at_previous_sync,
|
||||
current=current_state_ids,
|
||||
# we have to include LL members in case LL initial sync missed them
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
else:
|
||||
state_ids = {}
|
||||
if lazy_load_members:
|
||||
if types:
|
||||
# We're returning an incremental sync, with no
|
||||
# "gap" since the previous sync, so normally there would be
|
||||
# no state to return.
|
||||
# But we're lazy-loading, so the client might need some more
|
||||
# member events to understand the events in this timeline.
|
||||
# So we fish out all the member events corresponding to the
|
||||
# timeline here, and then dedupe any redundant ones below.
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=None, # we only want members!
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
if lazy_load_members and not include_redundant_members:
|
||||
@@ -821,7 +767,7 @@ class SyncHandler(object):
|
||||
logger.debug("filtering state from %r...", state_ids)
|
||||
state_ids = {
|
||||
t: event_id
|
||||
for t, event_id in iteritems(state_ids)
|
||||
for t, event_id in state_ids.iteritems()
|
||||
if cache.get(t[1]) != event_id
|
||||
}
|
||||
logger.debug("...to %r", state_ids)
|
||||
@@ -908,7 +854,7 @@ class SyncHandler(object):
|
||||
res = yield self._generate_sync_entry_for_rooms(
|
||||
sync_result_builder, account_data_by_room
|
||||
)
|
||||
newly_joined_rooms, newly_joined_users, _, _ = res
|
||||
newly_joined_rooms, newly_joined_or_invited_users, _, _ = res
|
||||
_, _, newly_left_rooms, newly_left_users = res
|
||||
|
||||
block_all_presence_data = (
|
||||
@@ -917,7 +863,7 @@ class SyncHandler(object):
|
||||
)
|
||||
if self.hs_config.use_presence and not block_all_presence_data:
|
||||
yield self._generate_sync_entry_for_presence(
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_users
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
|
||||
)
|
||||
|
||||
yield self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
@@ -925,7 +871,7 @@ class SyncHandler(object):
|
||||
device_lists = yield self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_users=newly_joined_users,
|
||||
newly_joined_or_invited_users=newly_joined_or_invited_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
@@ -1001,7 +947,8 @@ class SyncHandler(object):
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder,
|
||||
newly_joined_rooms, newly_joined_users,
|
||||
newly_joined_rooms,
|
||||
newly_joined_or_invited_users,
|
||||
newly_left_rooms, newly_left_users):
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
@@ -1015,7 +962,7 @@ class SyncHandler(object):
|
||||
# share a room with?
|
||||
for room_id in newly_joined_rooms:
|
||||
joined_users = yield self.state.get_current_user_in_room(room_id)
|
||||
newly_joined_users.update(joined_users)
|
||||
newly_joined_or_invited_users.update(joined_users)
|
||||
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = yield self.state.get_current_user_in_room(room_id)
|
||||
@@ -1023,7 +970,7 @@ class SyncHandler(object):
|
||||
|
||||
# TODO: Check that these users are actually new, i.e. either they
|
||||
# weren't in the previous sync *or* they left and rejoined.
|
||||
changed.update(newly_joined_users)
|
||||
changed.update(newly_joined_or_invited_users)
|
||||
|
||||
if not changed and not newly_left_users:
|
||||
defer.returnValue(DeviceLists(
|
||||
@@ -1141,7 +1088,7 @@ class SyncHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
|
||||
newly_joined_users):
|
||||
newly_joined_or_invited_users):
|
||||
"""Generates the presence portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
|
||||
@@ -1149,8 +1096,9 @@ class SyncHandler(object):
|
||||
sync_result_builder(SyncResultBuilder)
|
||||
newly_joined_rooms(list): List of rooms that the user has joined
|
||||
since the last sync (or empty if an initial sync)
|
||||
newly_joined_users(list): List of users that have joined rooms
|
||||
since the last sync (or empty if an initial sync)
|
||||
newly_joined_or_invited_users(list): List of users that have joined
|
||||
or been invited to rooms since the last sync (or empty if an initial
|
||||
sync)
|
||||
"""
|
||||
now_token = sync_result_builder.now_token
|
||||
sync_config = sync_result_builder.sync_config
|
||||
@@ -1176,7 +1124,7 @@ class SyncHandler(object):
|
||||
"presence_key", presence_key
|
||||
)
|
||||
|
||||
extra_users_ids = set(newly_joined_users)
|
||||
extra_users_ids = set(newly_joined_or_invited_users)
|
||||
for room_id in newly_joined_rooms:
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
extra_users_ids.update(users)
|
||||
@@ -1208,7 +1156,8 @@ class SyncHandler(object):
|
||||
|
||||
Returns:
|
||||
Deferred(tuple): Returns a 4-tuple of
|
||||
`(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)`
|
||||
`(newly_joined_rooms, newly_joined_or_invited_users,
|
||||
newly_left_rooms, newly_left_users)`
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
block_all_room_ephemeral = (
|
||||
@@ -1279,8 +1228,8 @@ class SyncHandler(object):
|
||||
|
||||
sync_result_builder.invited.extend(invited)
|
||||
|
||||
# Now we want to get any newly joined users
|
||||
newly_joined_users = set()
|
||||
# Now we want to get any newly joined or invited users
|
||||
newly_joined_or_invited_users = set()
|
||||
newly_left_users = set()
|
||||
if since_token:
|
||||
for joined_sync in sync_result_builder.joined:
|
||||
@@ -1289,19 +1238,22 @@ class SyncHandler(object):
|
||||
)
|
||||
for event in it:
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
newly_joined_users.add(event.state_key)
|
||||
if (
|
||||
event.membership == Membership.JOIN or
|
||||
event.membership == Membership.INVITE
|
||||
):
|
||||
newly_joined_or_invited_users.add(event.state_key)
|
||||
else:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership == Membership.JOIN:
|
||||
newly_left_users.add(event.state_key)
|
||||
|
||||
newly_left_users -= newly_joined_users
|
||||
newly_left_users -= newly_joined_or_invited_users
|
||||
|
||||
defer.returnValue((
|
||||
newly_joined_rooms,
|
||||
newly_joined_users,
|
||||
newly_joined_or_invited_users,
|
||||
newly_left_rooms,
|
||||
newly_left_users,
|
||||
))
|
||||
@@ -1346,7 +1298,7 @@ class SyncHandler(object):
|
||||
where:
|
||||
room_entries is a list [RoomSyncResultBuilder]
|
||||
invited_rooms is a list [InvitedSyncResult]
|
||||
newly_joined rooms is a list[str] of room ids
|
||||
newly_joined_rooms is a list[str] of room ids
|
||||
newly_left_rooms is a list[str] of room ids
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
@@ -1381,7 +1333,7 @@ class SyncHandler(object):
|
||||
if room_id in sync_result_builder.joined_room_ids and non_joins:
|
||||
# Always include if the user (re)joined the room, especially
|
||||
# important so that device list changes are calculated correctly.
|
||||
# If there are non join member events, but we are still in the room,
|
||||
# If there are non-join member events, but we are still in the room,
|
||||
# then the user must have left and joined
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
@@ -1622,19 +1574,6 @@ class SyncHandler(object):
|
||||
newly_joined_room=newly_joined,
|
||||
)
|
||||
|
||||
# When we join the room (or the client requests full_state), we should
|
||||
# send down any existing tags. Usually the user won't have tags in a
|
||||
# newly joined room, unless either a) they've joined before or b) the
|
||||
# tag was added by synapse e.g. for server notice rooms.
|
||||
if full_state:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
tags = yield self.store.get_tags_for_room(user_id, room_id)
|
||||
|
||||
# If there aren't any tags, don't send the empty tags list down
|
||||
# sync
|
||||
if not tags:
|
||||
tags = None
|
||||
|
||||
account_data_events = []
|
||||
if tags is not None:
|
||||
account_data_events.append({
|
||||
@@ -1663,24 +1602,10 @@ class SyncHandler(object):
|
||||
)
|
||||
|
||||
summary = {}
|
||||
|
||||
# we include a summary in room responses when we're lazy loading
|
||||
# members (as the client otherwise doesn't have enough info to form
|
||||
# the name itself).
|
||||
if (
|
||||
sync_config.filter_collection.lazy_load_members() and
|
||||
(
|
||||
# we recalulate the summary:
|
||||
# if there are membership changes in the timeline, or
|
||||
# if membership has changed during a gappy sync, or
|
||||
# if this is an initial sync.
|
||||
any(ev.type == EventTypes.Member for ev in batch.events) or
|
||||
(
|
||||
# XXX: this may include false positives in the form of LL
|
||||
# members which have snuck into state
|
||||
batch.limited and
|
||||
any(t == EventTypes.Member for (t, k) in state)
|
||||
) or
|
||||
since_token is None
|
||||
)
|
||||
):
|
||||
@@ -1710,16 +1635,6 @@ class SyncHandler(object):
|
||||
unread_notifications["highlight_count"] = notifs["highlight_count"]
|
||||
|
||||
sync_result_builder.joined.append(room_sync)
|
||||
|
||||
if batch.limited and since_token:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
logger.info(
|
||||
"Incremental gappy sync of %s for user %s with %d state events" % (
|
||||
room_id,
|
||||
user_id,
|
||||
len(state),
|
||||
)
|
||||
)
|
||||
elif room_builder.rtype == "archived":
|
||||
room_sync = ArchivedSyncResult(
|
||||
room_id=room_id,
|
||||
@@ -1813,17 +1728,17 @@ def _calculate_state(
|
||||
event_id_to_key = {
|
||||
e: key
|
||||
for key, e in itertools.chain(
|
||||
iteritems(timeline_contains),
|
||||
iteritems(previous),
|
||||
iteritems(timeline_start),
|
||||
iteritems(current),
|
||||
timeline_contains.items(),
|
||||
previous.items(),
|
||||
timeline_start.items(),
|
||||
current.items(),
|
||||
)
|
||||
}
|
||||
|
||||
c_ids = set(e for e in itervalues(current))
|
||||
ts_ids = set(e for e in itervalues(timeline_start))
|
||||
p_ids = set(e for e in itervalues(previous))
|
||||
tc_ids = set(e for e in itervalues(timeline_contains))
|
||||
c_ids = set(e for e in current.values())
|
||||
ts_ids = set(e for e in timeline_start.values())
|
||||
p_ids = set(e for e in previous.values())
|
||||
tc_ids = set(e for e in timeline_contains.values())
|
||||
|
||||
# If we are lazyloading room members, we explicitly add the membership events
|
||||
# for the senders in the timeline into the state block returned by /sync,
|
||||
@@ -1837,7 +1752,7 @@ def _calculate_state(
|
||||
|
||||
if lazy_load_members:
|
||||
p_ids.difference_update(
|
||||
e for t, e in iteritems(timeline_start)
|
||||
e for t, e in timeline_start.iteritems()
|
||||
if t[0] == EventTypes.Member
|
||||
)
|
||||
|
||||
|
||||
@@ -119,8 +119,6 @@ class UserDirectoryHandler(object):
|
||||
"""Called to update index of our local user profiles when they change
|
||||
irrespective of any rooms the user may be in.
|
||||
"""
|
||||
# FIXME(#3714): We should probably do this in the same worker as all
|
||||
# the other changes.
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
)
|
||||
@@ -129,8 +127,6 @@ class UserDirectoryHandler(object):
|
||||
def handle_user_deactivated(self, user_id):
|
||||
"""Called when a user ID is deactivated
|
||||
"""
|
||||
# FIXME(#3714): We should probably do this in the same worker as all
|
||||
# the other changes.
|
||||
yield self.store.remove_from_user_dir(user_id)
|
||||
yield self.store.remove_from_user_in_public_room(user_id)
|
||||
|
||||
|
||||
@@ -13,25 +13,24 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from six import text_type
|
||||
from six.moves import urllib
|
||||
from six import StringIO
|
||||
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from prometheus_client import Counter
|
||||
|
||||
from OpenSSL import SSL
|
||||
from OpenSSL.SSL import VERIFY_NONE
|
||||
from twisted.internet import defer, protocol, reactor, ssl
|
||||
from twisted.internet import defer, protocol, reactor, ssl, task
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import (
|
||||
Agent,
|
||||
BrowserLikeRedirectAgent,
|
||||
ContentDecoderAgent,
|
||||
FileBodyProducer as TwistedFileBodyProducer,
|
||||
GzipDecoder,
|
||||
HTTPConnectionPool,
|
||||
PartialDownloadError,
|
||||
@@ -84,20 +83,18 @@ class SimpleHttpClient(object):
|
||||
if hs.config.user_agent_suffix:
|
||||
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
|
||||
|
||||
self.user_agent = self.user_agent.encode('ascii')
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, data=b'', headers=None):
|
||||
def request(self, method, uri, *args, **kwargs):
|
||||
# A small wrapper around self.agent.request() so we can easily attach
|
||||
# counters to it
|
||||
outgoing_requests_counter.labels(method).inc()
|
||||
|
||||
# log request but strip `access_token` (AS requests for example include this)
|
||||
logger.info("Sending request %s %s", method, redact_uri(uri.encode('ascii')))
|
||||
logger.info("Sending request %s %s", method, redact_uri(uri))
|
||||
|
||||
try:
|
||||
request_deferred = treq.request(
|
||||
method, uri, agent=self.agent, data=data, headers=headers
|
||||
request_deferred = self.agent.request(
|
||||
method, uri, *args, **kwargs
|
||||
)
|
||||
add_timeout_to_deferred(
|
||||
request_deferred, 60, self.hs.get_reactor(),
|
||||
@@ -108,14 +105,14 @@ class SimpleHttpClient(object):
|
||||
incoming_responses_counter.labels(method, response.code).inc()
|
||||
logger.info(
|
||||
"Received response to %s %s: %s",
|
||||
method, redact_uri(uri.encode('ascii')), response.code
|
||||
method, redact_uri(uri), response.code
|
||||
)
|
||||
defer.returnValue(response)
|
||||
except Exception as e:
|
||||
incoming_responses_counter.labels(method, "ERR").inc()
|
||||
logger.info(
|
||||
"Error sending request to %s %s: %s %s",
|
||||
method, redact_uri(uri.encode('ascii')), type(e).__name__, e.args[0]
|
||||
method, redact_uri(uri), type(e).__name__, e.message
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -140,8 +137,7 @@ class SimpleHttpClient(object):
|
||||
# TODO: Do we ever want to log message contents?
|
||||
logger.debug("post_urlencoded_get_json args: %s", args)
|
||||
|
||||
query_bytes = urllib.parse.urlencode(
|
||||
encode_urlencode_args(args), True).encode("utf8")
|
||||
query_bytes = urllib.urlencode(encode_urlencode_args(args), True)
|
||||
|
||||
actual_headers = {
|
||||
b"Content-Type": [b"application/x-www-form-urlencoded"],
|
||||
@@ -152,14 +148,15 @@ class SimpleHttpClient(object):
|
||||
|
||||
response = yield self.request(
|
||||
"POST",
|
||||
uri,
|
||||
uri.encode("ascii"),
|
||||
headers=Headers(actual_headers),
|
||||
data=query_bytes
|
||||
bodyProducer=FileBodyProducer(StringIO(query_bytes))
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
body = yield make_deferred_yieldable(treq.json_content(response))
|
||||
defer.returnValue(body)
|
||||
defer.returnValue(json.loads(body))
|
||||
else:
|
||||
raise HttpResponseException(response.code, response.phrase, body)
|
||||
|
||||
@@ -194,9 +191,9 @@ class SimpleHttpClient(object):
|
||||
|
||||
response = yield self.request(
|
||||
"POST",
|
||||
uri,
|
||||
uri.encode("ascii"),
|
||||
headers=Headers(actual_headers),
|
||||
data=json_str
|
||||
bodyProducer=FileBodyProducer(StringIO(json_str))
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
@@ -251,7 +248,7 @@ class SimpleHttpClient(object):
|
||||
ValueError: if the response was not JSON
|
||||
"""
|
||||
if len(args):
|
||||
query_bytes = urllib.parse.urlencode(args, True)
|
||||
query_bytes = urllib.urlencode(args, True)
|
||||
uri = "%s?%s" % (uri, query_bytes)
|
||||
|
||||
json_str = encode_canonical_json(json_body)
|
||||
@@ -265,9 +262,9 @@ class SimpleHttpClient(object):
|
||||
|
||||
response = yield self.request(
|
||||
"PUT",
|
||||
uri,
|
||||
uri.encode("ascii"),
|
||||
headers=Headers(actual_headers),
|
||||
data=json_str
|
||||
bodyProducer=FileBodyProducer(StringIO(json_str))
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
@@ -296,7 +293,7 @@ class SimpleHttpClient(object):
|
||||
HttpResponseException on a non-2xx HTTP response.
|
||||
"""
|
||||
if len(args):
|
||||
query_bytes = urllib.parse.urlencode(args, True)
|
||||
query_bytes = urllib.urlencode(args, True)
|
||||
uri = "%s?%s" % (uri, query_bytes)
|
||||
|
||||
actual_headers = {
|
||||
@@ -307,7 +304,7 @@ class SimpleHttpClient(object):
|
||||
|
||||
response = yield self.request(
|
||||
"GET",
|
||||
uri,
|
||||
uri.encode("ascii"),
|
||||
headers=Headers(actual_headers),
|
||||
)
|
||||
|
||||
@@ -342,7 +339,7 @@ class SimpleHttpClient(object):
|
||||
|
||||
response = yield self.request(
|
||||
"GET",
|
||||
url,
|
||||
url.encode("ascii"),
|
||||
headers=Headers(actual_headers),
|
||||
)
|
||||
|
||||
@@ -437,12 +434,12 @@ class CaptchaServerHttpClient(SimpleHttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_urlencoded_get_raw(self, url, args={}):
|
||||
query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True)
|
||||
query_bytes = urllib.urlencode(encode_urlencode_args(args), True)
|
||||
|
||||
response = yield self.request(
|
||||
"POST",
|
||||
url,
|
||||
data=query_bytes,
|
||||
url.encode("ascii"),
|
||||
bodyProducer=FileBodyProducer(StringIO(query_bytes)),
|
||||
headers=Headers({
|
||||
b"Content-Type": [b"application/x-www-form-urlencoded"],
|
||||
b"User-Agent": [self.user_agent],
|
||||
@@ -513,7 +510,7 @@ def encode_urlencode_args(args):
|
||||
|
||||
|
||||
def encode_urlencode_arg(arg):
|
||||
if isinstance(arg, text_type):
|
||||
if isinstance(arg, unicode):
|
||||
return arg.encode('utf-8')
|
||||
elif isinstance(arg, list):
|
||||
return [encode_urlencode_arg(i) for i in arg]
|
||||
@@ -545,3 +542,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
return self
|
||||
|
||||
|
||||
class FileBodyProducer(TwistedFileBodyProducer):
|
||||
"""Workaround for https://twistedmatrix.com/trac/ticket/8473
|
||||
|
||||
We override the pauseProducing and resumeProducing methods in twisted's
|
||||
FileBodyProducer so that they do not raise exceptions if the task has
|
||||
already completed.
|
||||
"""
|
||||
|
||||
def pauseProducing(self):
|
||||
try:
|
||||
super(FileBodyProducer, self).pauseProducing()
|
||||
except task.TaskDone:
|
||||
# task has already completed
|
||||
pass
|
||||
|
||||
def resumeProducing(self):
|
||||
try:
|
||||
super(FileBodyProducer, self).resumeProducing()
|
||||
except task.NotPaused:
|
||||
# task was not paused (probably because it had already completed)
|
||||
pass
|
||||
|
||||
@@ -17,19 +17,19 @@ import cgi
|
||||
import logging
|
||||
import random
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
from six import PY3, string_types
|
||||
from six.moves import urllib
|
||||
from six import string_types
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from prometheus_client import Counter
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer, protocol, reactor
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import Agent, HTTPConnectionPool
|
||||
from twisted.web.client import Agent, HTTPConnectionPool, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
import synapse.metrics
|
||||
@@ -58,18 +58,13 @@ incoming_responses_counter = Counter("synapse_http_matrixfederationclient_respon
|
||||
MAX_LONG_RETRIES = 10
|
||||
MAX_SHORT_RETRIES = 3
|
||||
|
||||
if PY3:
|
||||
MAXINT = sys.maxsize
|
||||
else:
|
||||
MAXINT = sys.maxint
|
||||
|
||||
|
||||
class MatrixFederationEndpointFactory(object):
|
||||
def __init__(self, hs):
|
||||
self.tls_client_options_factory = hs.tls_client_options_factory
|
||||
|
||||
def endpointForURI(self, uri):
|
||||
destination = uri.netloc.decode('ascii')
|
||||
destination = uri.netloc
|
||||
|
||||
return matrix_federation_endpoint(
|
||||
reactor, destination, timeout=10,
|
||||
@@ -98,32 +93,26 @@ class MatrixFederationHttpClient(object):
|
||||
)
|
||||
self.clock = hs.get_clock()
|
||||
self._store = hs.get_datastore()
|
||||
self.version_string = hs.version_string.encode('ascii')
|
||||
self.version_string = hs.version_string
|
||||
self._next_id = 1
|
||||
|
||||
def _create_url(self, destination, path_bytes, param_bytes, query_bytes):
|
||||
return urllib.parse.urlunparse(
|
||||
(b"matrix", destination, path_bytes, param_bytes, query_bytes, b"")
|
||||
return urlparse.urlunparse(
|
||||
("matrix", destination, path_bytes, param_bytes, query_bytes, "")
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _request(self, destination, method, path,
|
||||
json=None, json_callback=None,
|
||||
param_bytes=b"",
|
||||
query=None, retry_on_dns_fail=True,
|
||||
body_callback, headers_dict={}, param_bytes=b"",
|
||||
query_bytes=b"", retry_on_dns_fail=True,
|
||||
timeout=None, long_retries=False,
|
||||
ignore_backoff=False,
|
||||
backoff_on_404=False):
|
||||
"""
|
||||
Creates and sends a request to the given server.
|
||||
|
||||
""" Creates and sends a request to the given server
|
||||
Args:
|
||||
destination (str): The remote server to send the HTTP request to.
|
||||
method (str): HTTP method
|
||||
path (str): The HTTP path
|
||||
json (dict or None): JSON to send in the body.
|
||||
json_callback (func or None): A callback to generate the JSON.
|
||||
query (dict or None): Query arguments.
|
||||
ignore_backoff (bool): true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
backoff_on_404 (bool): Back off if we get a 404
|
||||
@@ -144,7 +133,7 @@ class MatrixFederationHttpClient(object):
|
||||
failures, connection failures, SSL failures.)
|
||||
"""
|
||||
if (
|
||||
self.hs.config.federation_domain_whitelist is not None and
|
||||
self.hs.config.federation_domain_whitelist and
|
||||
destination not in self.hs.config.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(destination)
|
||||
@@ -157,25 +146,23 @@ class MatrixFederationHttpClient(object):
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
headers_dict = {}
|
||||
destination = destination.encode("ascii")
|
||||
path_bytes = path.encode("ascii")
|
||||
if query:
|
||||
query_bytes = encode_query_args(query)
|
||||
else:
|
||||
query_bytes = b""
|
||||
|
||||
headers_dict = {
|
||||
"User-Agent": [self.version_string],
|
||||
"Host": [destination],
|
||||
}
|
||||
|
||||
with limiter:
|
||||
url = self._create_url(
|
||||
destination.encode("ascii"), path_bytes, param_bytes, query_bytes
|
||||
).decode('ascii')
|
||||
headers_dict[b"User-Agent"] = [self.version_string]
|
||||
headers_dict[b"Host"] = [destination]
|
||||
|
||||
url_bytes = self._create_url(
|
||||
destination, path_bytes, param_bytes, query_bytes
|
||||
)
|
||||
|
||||
txn_id = "%s-O-%s" % (method, self._next_id)
|
||||
self._next_id = (self._next_id + 1) % (MAXINT - 1)
|
||||
self._next_id = (self._next_id + 1) % (sys.maxint - 1)
|
||||
|
||||
outbound_logger.info(
|
||||
"{%s} [%s] Sending request: %s %s",
|
||||
txn_id, destination, method, url_bytes
|
||||
)
|
||||
|
||||
# XXX: Would be much nicer to retry only at the transaction-layer
|
||||
# (once we have reliable transactions in place)
|
||||
@@ -184,94 +171,80 @@ class MatrixFederationHttpClient(object):
|
||||
else:
|
||||
retries_left = MAX_SHORT_RETRIES
|
||||
|
||||
http_url = urllib.parse.urlunparse(
|
||||
(b"", b"", path_bytes, param_bytes, query_bytes, b"")
|
||||
).decode('ascii')
|
||||
http_url_bytes = urlparse.urlunparse(
|
||||
("", "", path_bytes, param_bytes, query_bytes, "")
|
||||
)
|
||||
|
||||
log_result = None
|
||||
while True:
|
||||
try:
|
||||
if json_callback:
|
||||
json = json_callback()
|
||||
try:
|
||||
while True:
|
||||
producer = None
|
||||
if body_callback:
|
||||
producer = body_callback(method, http_url_bytes, headers_dict)
|
||||
|
||||
if json:
|
||||
data = encode_canonical_json(json)
|
||||
headers_dict["Content-Type"] = ["application/json"]
|
||||
self.sign_request(
|
||||
destination, method, http_url, headers_dict, json
|
||||
try:
|
||||
request_deferred = self.agent.request(
|
||||
method,
|
||||
url_bytes,
|
||||
Headers(headers_dict),
|
||||
producer
|
||||
)
|
||||
add_timeout_to_deferred(
|
||||
request_deferred,
|
||||
timeout / 1000. if timeout else 60,
|
||||
self.hs.get_reactor(),
|
||||
cancelled_to_request_timed_out_error,
|
||||
)
|
||||
response = yield make_deferred_yieldable(
|
||||
request_deferred,
|
||||
)
|
||||
else:
|
||||
data = None
|
||||
self.sign_request(destination, method, http_url, headers_dict)
|
||||
|
||||
outbound_logger.info(
|
||||
"{%s} [%s] Sending request: %s %s",
|
||||
txn_id, destination, method, url
|
||||
)
|
||||
log_result = "%d %s" % (response.code, response.phrase,)
|
||||
break
|
||||
except Exception as e:
|
||||
if not retry_on_dns_fail and isinstance(e, DNSLookupError):
|
||||
logger.warn(
|
||||
"DNS Lookup failed to %s with %s",
|
||||
destination,
|
||||
e
|
||||
)
|
||||
log_result = "DNS Lookup failed to %s with %s" % (
|
||||
destination, e
|
||||
)
|
||||
raise
|
||||
|
||||
request_deferred = treq.request(
|
||||
method,
|
||||
url,
|
||||
headers=Headers(headers_dict),
|
||||
data=data,
|
||||
agent=self.agent,
|
||||
)
|
||||
add_timeout_to_deferred(
|
||||
request_deferred,
|
||||
timeout / 1000. if timeout else 60,
|
||||
self.hs.get_reactor(),
|
||||
cancelled_to_request_timed_out_error,
|
||||
)
|
||||
response = yield make_deferred_yieldable(
|
||||
request_deferred,
|
||||
)
|
||||
|
||||
log_result = "%d %s" % (response.code, response.phrase,)
|
||||
break
|
||||
except Exception as e:
|
||||
if not retry_on_dns_fail and isinstance(e, DNSLookupError):
|
||||
logger.warn(
|
||||
"DNS Lookup failed to %s with %s",
|
||||
"{%s} Sending request failed to %s: %s %s: %s",
|
||||
txn_id,
|
||||
destination,
|
||||
e
|
||||
method,
|
||||
url_bytes,
|
||||
_flatten_response_never_received(e),
|
||||
)
|
||||
log_result = "DNS Lookup failed to %s with %s" % (
|
||||
destination, e
|
||||
)
|
||||
raise
|
||||
|
||||
logger.warn(
|
||||
"{%s} Sending request failed to %s: %s %s: %s",
|
||||
txn_id,
|
||||
destination,
|
||||
method,
|
||||
url,
|
||||
_flatten_response_never_received(e),
|
||||
)
|
||||
log_result = _flatten_response_never_received(e)
|
||||
|
||||
log_result = _flatten_response_never_received(e)
|
||||
if retries_left and not timeout:
|
||||
if long_retries:
|
||||
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
|
||||
delay = min(delay, 60)
|
||||
delay *= random.uniform(0.8, 1.4)
|
||||
else:
|
||||
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
|
||||
delay = min(delay, 2)
|
||||
delay *= random.uniform(0.8, 1.4)
|
||||
|
||||
if retries_left and not timeout:
|
||||
if long_retries:
|
||||
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
|
||||
delay = min(delay, 60)
|
||||
delay *= random.uniform(0.8, 1.4)
|
||||
yield self.clock.sleep(delay)
|
||||
retries_left -= 1
|
||||
else:
|
||||
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
|
||||
delay = min(delay, 2)
|
||||
delay *= random.uniform(0.8, 1.4)
|
||||
|
||||
yield self.clock.sleep(delay)
|
||||
retries_left -= 1
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
outbound_logger.info(
|
||||
"{%s} [%s] Result: %s",
|
||||
txn_id,
|
||||
destination,
|
||||
log_result,
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
outbound_logger.info(
|
||||
"{%s} [%s] Result: %s",
|
||||
txn_id,
|
||||
destination,
|
||||
log_result,
|
||||
)
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
pass
|
||||
@@ -279,10 +252,7 @@ class MatrixFederationHttpClient(object):
|
||||
# :'(
|
||||
# Update transactions table?
|
||||
with logcontext.PreserveLoggingContext():
|
||||
body = yield self._timeout_deferred(
|
||||
treq.content(response),
|
||||
timeout,
|
||||
)
|
||||
body = yield readBody(response)
|
||||
raise HttpResponseException(
|
||||
response.code, response.phrase, body
|
||||
)
|
||||
@@ -327,11 +297,11 @@ class MatrixFederationHttpClient(object):
|
||||
auth_headers = []
|
||||
|
||||
for key, sig in request["signatures"][self.server_name].items():
|
||||
auth_headers.append((
|
||||
auth_headers.append(bytes(
|
||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
self.server_name, key, sig,
|
||||
)).encode('ascii')
|
||||
)
|
||||
)
|
||||
))
|
||||
|
||||
headers_dict[b"Authorization"] = auth_headers
|
||||
|
||||
@@ -377,14 +347,24 @@ class MatrixFederationHttpClient(object):
|
||||
"""
|
||||
|
||||
if not json_data_callback:
|
||||
json_data_callback = lambda: data
|
||||
def json_data_callback():
|
||||
return data
|
||||
|
||||
def body_callback(method, url_bytes, headers_dict):
|
||||
json_data = json_data_callback()
|
||||
self.sign_request(
|
||||
destination, method, url_bytes, headers_dict, json_data
|
||||
)
|
||||
producer = _JsonProducer(json_data)
|
||||
return producer
|
||||
|
||||
response = yield self._request(
|
||||
destination,
|
||||
"PUT",
|
||||
path,
|
||||
json_callback=json_data_callback,
|
||||
query=args,
|
||||
body_callback=body_callback,
|
||||
headers_dict={"Content-Type": ["application/json"]},
|
||||
query_bytes=encode_query_args(args),
|
||||
long_retries=long_retries,
|
||||
timeout=timeout,
|
||||
ignore_backoff=ignore_backoff,
|
||||
@@ -396,11 +376,8 @@ class MatrixFederationHttpClient(object):
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
with logcontext.PreserveLoggingContext():
|
||||
body = yield self._timeout_deferred(
|
||||
treq.json_content(response),
|
||||
timeout,
|
||||
)
|
||||
defer.returnValue(body)
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_json(self, destination, path, data={}, long_retries=False,
|
||||
@@ -433,12 +410,20 @@ class MatrixFederationHttpClient(object):
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
"""
|
||||
|
||||
def body_callback(method, url_bytes, headers_dict):
|
||||
self.sign_request(
|
||||
destination, method, url_bytes, headers_dict, data
|
||||
)
|
||||
return _JsonProducer(data)
|
||||
|
||||
response = yield self._request(
|
||||
destination,
|
||||
"POST",
|
||||
path,
|
||||
query=args,
|
||||
json=data,
|
||||
query_bytes=encode_query_args(args),
|
||||
body_callback=body_callback,
|
||||
headers_dict={"Content-Type": ["application/json"]},
|
||||
long_retries=long_retries,
|
||||
timeout=timeout,
|
||||
ignore_backoff=ignore_backoff,
|
||||
@@ -449,12 +434,9 @@ class MatrixFederationHttpClient(object):
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
with logcontext.PreserveLoggingContext():
|
||||
body = yield self._timeout_deferred(
|
||||
treq.json_content(response),
|
||||
timeout,
|
||||
)
|
||||
body = yield readBody(response)
|
||||
|
||||
defer.returnValue(body)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
|
||||
@@ -489,11 +471,16 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
|
||||
|
||||
def body_callback(method, url_bytes, headers_dict):
|
||||
self.sign_request(destination, method, url_bytes, headers_dict)
|
||||
return None
|
||||
|
||||
response = yield self._request(
|
||||
destination,
|
||||
"GET",
|
||||
path,
|
||||
query=args,
|
||||
query_bytes=encode_query_args(args),
|
||||
body_callback=body_callback,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
timeout=timeout,
|
||||
ignore_backoff=ignore_backoff,
|
||||
@@ -504,12 +491,9 @@ class MatrixFederationHttpClient(object):
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
with logcontext.PreserveLoggingContext():
|
||||
body = yield self._timeout_deferred(
|
||||
treq.json_content(response),
|
||||
timeout,
|
||||
)
|
||||
body = yield readBody(response)
|
||||
|
||||
defer.returnValue(body)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_json(self, destination, path, long_retries=False,
|
||||
@@ -539,11 +523,13 @@ class MatrixFederationHttpClient(object):
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
"""
|
||||
|
||||
response = yield self._request(
|
||||
destination,
|
||||
"DELETE",
|
||||
path,
|
||||
query=args,
|
||||
query_bytes=encode_query_args(args),
|
||||
headers_dict={"Content-Type": ["application/json"]},
|
||||
long_retries=long_retries,
|
||||
timeout=timeout,
|
||||
ignore_backoff=ignore_backoff,
|
||||
@@ -554,12 +540,9 @@ class MatrixFederationHttpClient(object):
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
with logcontext.PreserveLoggingContext():
|
||||
body = yield self._timeout_deferred(
|
||||
treq.json_content(response),
|
||||
timeout,
|
||||
)
|
||||
body = yield readBody(response)
|
||||
|
||||
defer.returnValue(body)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_file(self, destination, path, output_stream, args={},
|
||||
@@ -586,11 +569,26 @@ class MatrixFederationHttpClient(object):
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
"""
|
||||
|
||||
encoded_args = {}
|
||||
for k, vs in args.items():
|
||||
if isinstance(vs, string_types):
|
||||
vs = [vs]
|
||||
encoded_args[k] = [v.encode("UTF-8") for v in vs]
|
||||
|
||||
query_bytes = urllib.urlencode(encoded_args, True)
|
||||
logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail)
|
||||
|
||||
def body_callback(method, url_bytes, headers_dict):
|
||||
self.sign_request(destination, method, url_bytes, headers_dict)
|
||||
return None
|
||||
|
||||
response = yield self._request(
|
||||
destination,
|
||||
"GET",
|
||||
path,
|
||||
query=args,
|
||||
query_bytes=query_bytes,
|
||||
body_callback=body_callback,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
@@ -599,10 +597,8 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
try:
|
||||
with logcontext.PreserveLoggingContext():
|
||||
length = yield self._timeout_deferred(
|
||||
_readBodyToFile(
|
||||
response, output_stream, max_size
|
||||
),
|
||||
length = yield _readBodyToFile(
|
||||
response, output_stream, max_size
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to download body")
|
||||
@@ -610,27 +606,6 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
defer.returnValue((length, headers))
|
||||
|
||||
def _timeout_deferred(self, deferred, timeout_ms=None):
|
||||
"""Times the deferred out after `timeout_ms` ms
|
||||
|
||||
Args:
|
||||
deferred (Deferred)
|
||||
timeout_ms (int|None): Timeout in milliseconds. If None defaults
|
||||
to 60 seconds.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
|
||||
add_timeout_to_deferred(
|
||||
deferred,
|
||||
timeout_ms / 1000. if timeout_ms else 60,
|
||||
self.hs.get_reactor(),
|
||||
cancelled_to_request_timed_out_error,
|
||||
)
|
||||
|
||||
return deferred
|
||||
|
||||
|
||||
class _ReadBodyToFileProtocol(protocol.Protocol):
|
||||
def __init__(self, stream, deferred, max_size):
|
||||
@@ -664,6 +639,30 @@ def _readBodyToFile(response, stream, max_size):
|
||||
return d
|
||||
|
||||
|
||||
class _JsonProducer(object):
|
||||
""" Used by the twisted http client to create the HTTP body from json
|
||||
"""
|
||||
def __init__(self, jsn):
|
||||
self.reset(jsn)
|
||||
|
||||
def reset(self, jsn):
|
||||
self.body = encode_canonical_json(jsn)
|
||||
self.length = len(self.body)
|
||||
|
||||
def startProducing(self, consumer):
|
||||
consumer.write(self.body)
|
||||
return defer.succeed(None)
|
||||
|
||||
def pauseProducing(self):
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
|
||||
def resumeProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
def _flatten_response_never_received(e):
|
||||
if hasattr(e, "reasons"):
|
||||
reasons = ", ".join(
|
||||
@@ -694,7 +693,7 @@ def check_content_type_is_json(headers):
|
||||
"No Content-Type header"
|
||||
)
|
||||
|
||||
c_type = c_type[0].decode('ascii') # only the first header
|
||||
c_type = c_type[0] # only the first header
|
||||
val, options = cgi.parse_header(c_type)
|
||||
if val != "application/json":
|
||||
raise RuntimeError(
|
||||
@@ -712,6 +711,6 @@ def encode_query_args(args):
|
||||
vs = [vs]
|
||||
encoded_args[k] = [v.encode("UTF-8") for v in vs]
|
||||
|
||||
query_bytes = urllib.parse.urlencode(encoded_args, True)
|
||||
query_bytes = urllib.urlencode(encoded_args, True)
|
||||
|
||||
return query_bytes.encode('utf8')
|
||||
return query_bytes
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from prometheus_client.core import Counter, Histogram
|
||||
|
||||
@@ -112,9 +111,6 @@ in_flight_requests_db_sched_duration = Counter(
|
||||
# The set of all in flight requests, set[RequestMetrics]
|
||||
_in_flight_requests = set()
|
||||
|
||||
# Protects the _in_flight_requests set from concurrent accesss
|
||||
_in_flight_requests_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_in_flight_counts():
|
||||
"""Returns a count of all in flight requests by (method, server_name)
|
||||
@@ -124,8 +120,7 @@ def _get_in_flight_counts():
|
||||
"""
|
||||
# Cast to a list to prevent it changing while the Prometheus
|
||||
# thread is collecting metrics
|
||||
with _in_flight_requests_lock:
|
||||
reqs = list(_in_flight_requests)
|
||||
reqs = list(_in_flight_requests)
|
||||
|
||||
for rm in reqs:
|
||||
rm.update_metrics()
|
||||
@@ -159,12 +154,10 @@ class RequestMetrics(object):
|
||||
# to the "in flight" metrics.
|
||||
self._request_stats = self.start_context.get_resource_usage()
|
||||
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.add(self)
|
||||
_in_flight_requests.add(self)
|
||||
|
||||
def stop(self, time_sec, request):
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.discard(self)
|
||||
_in_flight_requests.discard(self)
|
||||
|
||||
context = LoggingContext.current_context()
|
||||
|
||||
|
||||
@@ -204,14 +204,14 @@ class SynapseRequest(Request):
|
||||
self.start_time = time.time()
|
||||
self.request_metrics = RequestMetrics()
|
||||
self.request_metrics.start(
|
||||
self.start_time, name=servlet_name, method=self.method.decode('ascii'),
|
||||
self.start_time, name=servlet_name, method=self.method,
|
||||
)
|
||||
|
||||
self.site.access_logger.info(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
self.method.decode('ascii'),
|
||||
self.method,
|
||||
self.get_redacted_uri()
|
||||
)
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
|
||||
import six
|
||||
|
||||
from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
|
||||
@@ -80,9 +78,6 @@ _background_process_counts = dict() # type: dict[str, int]
|
||||
# of process descriptions that no longer have any active processes.
|
||||
_background_processes = dict() # type: dict[str, set[_BackgroundProcess]]
|
||||
|
||||
# A lock that covers the above dicts
|
||||
_bg_metrics_lock = threading.Lock()
|
||||
|
||||
|
||||
class _Collector(object):
|
||||
"""A custom metrics collector for the background process metrics.
|
||||
@@ -97,11 +92,7 @@ class _Collector(object):
|
||||
labels=["name"],
|
||||
)
|
||||
|
||||
# We copy the dict so that it doesn't change from underneath us
|
||||
with _bg_metrics_lock:
|
||||
_background_processes_copy = dict(_background_processes)
|
||||
|
||||
for desc, processes in six.iteritems(_background_processes_copy):
|
||||
for desc, processes in six.iteritems(_background_processes):
|
||||
background_process_in_flight_count.add_metric(
|
||||
(desc,), len(processes),
|
||||
)
|
||||
@@ -176,26 +167,19 @@ def run_as_background_process(desc, func, *args, **kwargs):
|
||||
"""
|
||||
@defer.inlineCallbacks
|
||||
def run():
|
||||
with _bg_metrics_lock:
|
||||
count = _background_process_counts.get(desc, 0)
|
||||
_background_process_counts[desc] = count + 1
|
||||
|
||||
count = _background_process_counts.get(desc, 0)
|
||||
_background_process_counts[desc] = count + 1
|
||||
_background_process_start_count.labels(desc).inc()
|
||||
|
||||
with LoggingContext(desc) as context:
|
||||
context.request = "%s-%i" % (desc, count)
|
||||
proc = _BackgroundProcess(desc, context)
|
||||
|
||||
with _bg_metrics_lock:
|
||||
_background_processes.setdefault(desc, set()).add(proc)
|
||||
|
||||
_background_processes.setdefault(desc, set()).add(proc)
|
||||
try:
|
||||
yield func(*args, **kwargs)
|
||||
finally:
|
||||
proc.update_metrics()
|
||||
|
||||
with _bg_metrics_lock:
|
||||
_background_processes[desc].remove(proc)
|
||||
_background_processes[desc].remove(proc)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
return run()
|
||||
|
||||
@@ -39,11 +39,10 @@ REQUIREMENTS = {
|
||||
"signedjson>=1.0.0": ["signedjson>=1.0.0"],
|
||||
"pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
|
||||
"service_identity>=1.0.0": ["service_identity>=1.0.0"],
|
||||
"Twisted>=17.1.0": ["twisted>=17.1.0"],
|
||||
"treq>=15.1": ["treq>=15.1"],
|
||||
"Twisted>=16.0.0": ["twisted>=16.0.0"],
|
||||
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
"pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"],
|
||||
# We use crypto.get_elliptic_curve which is only supported in >=0.15
|
||||
"pyopenssl>=0.15": ["OpenSSL>=0.15"],
|
||||
|
||||
"pyyaml": ["yaml"],
|
||||
"pyasn1": ["pyasn1"],
|
||||
@@ -79,9 +78,6 @@ CONDITIONAL_REQUIREMENTS = {
|
||||
"affinity": {
|
||||
"affinity": ["affinity"],
|
||||
},
|
||||
"postgres": {
|
||||
"psycopg2>=2.6": ["psycopg2"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -590,9 +590,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
pending_commands = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_pending_commands",
|
||||
"",
|
||||
["name"],
|
||||
["name", "conn_id"],
|
||||
lambda: {
|
||||
(p.name,): len(p.pending_commands) for p in connected_connections
|
||||
(p.name, p.conn_id): len(p.pending_commands) for p in connected_connections
|
||||
},
|
||||
)
|
||||
|
||||
@@ -607,9 +607,9 @@ def transport_buffer_size(protocol):
|
||||
transport_send_buffer = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_transport_send_buffer",
|
||||
"",
|
||||
["name"],
|
||||
["name", "conn_id"],
|
||||
lambda: {
|
||||
(p.name,): transport_buffer_size(p) for p in connected_connections
|
||||
(p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections
|
||||
},
|
||||
)
|
||||
|
||||
@@ -632,9 +632,9 @@ def transport_kernel_read_buffer_size(protocol, read=True):
|
||||
tcp_transport_kernel_send_buffer = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_transport_kernel_send_buffer",
|
||||
"",
|
||||
["name"],
|
||||
["name", "conn_id"],
|
||||
lambda: {
|
||||
(p.name,): transport_kernel_read_buffer_size(p, False)
|
||||
(p.name, p.conn_id): transport_kernel_read_buffer_size(p, False)
|
||||
for p in connected_connections
|
||||
},
|
||||
)
|
||||
@@ -643,9 +643,9 @@ tcp_transport_kernel_send_buffer = LaterGauge(
|
||||
tcp_transport_kernel_read_buffer = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_transport_kernel_read_buffer",
|
||||
"",
|
||||
["name"],
|
||||
["name", "conn_id"],
|
||||
lambda: {
|
||||
(p.name,): transport_kernel_read_buffer_size(p, True)
|
||||
(p.name, p.conn_id): transport_kernel_read_buffer_size(p, True)
|
||||
for p in connected_connections
|
||||
},
|
||||
)
|
||||
@@ -654,9 +654,9 @@ tcp_transport_kernel_read_buffer = LaterGauge(
|
||||
tcp_inbound_commands = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_inbound_commands",
|
||||
"",
|
||||
["command", "name"],
|
||||
["command", "name", "conn_id"],
|
||||
lambda: {
|
||||
(k[0], p.name,): count
|
||||
(k[0], p.name, p.conn_id): count
|
||||
for p in connected_connections
|
||||
for k, count in iteritems(p.inbound_commands_counter)
|
||||
},
|
||||
@@ -665,9 +665,9 @@ tcp_inbound_commands = LaterGauge(
|
||||
tcp_outbound_commands = LaterGauge(
|
||||
"synapse_replication_tcp_protocol_outbound_commands",
|
||||
"",
|
||||
["command", "name"],
|
||||
["command", "name", "conn_id"],
|
||||
lambda: {
|
||||
(k[0], p.name,): count
|
||||
(k[0], p.name, p.conn_id): count
|
||||
for p in connected_connections
|
||||
for k, count in iteritems(p.outbound_commands_counter)
|
||||
},
|
||||
|
||||
@@ -101,7 +101,7 @@ class UserRegisterServlet(ClientV1RestServlet):
|
||||
|
||||
nonce = self.hs.get_secrets().token_hex(64)
|
||||
self.nonces[nonce] = int(self.reactor.seconds())
|
||||
return (200, {"nonce": nonce})
|
||||
return (200, {"nonce": nonce.encode('ascii')})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
@@ -164,7 +164,7 @@ class UserRegisterServlet(ClientV1RestServlet):
|
||||
key=self.hs.config.registration_shared_secret.encode(),
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
want_mac.update(nonce.encode('utf8'))
|
||||
want_mac.update(nonce)
|
||||
want_mac.update(b"\x00")
|
||||
want_mac.update(username)
|
||||
want_mac.update(b"\x00")
|
||||
@@ -173,10 +173,7 @@ class UserRegisterServlet(ClientV1RestServlet):
|
||||
want_mac.update(b"admin" if admin else b"notadmin")
|
||||
want_mac = want_mac.hexdigest()
|
||||
|
||||
if not hmac.compare_digest(
|
||||
want_mac.encode('ascii'),
|
||||
got_mac.encode('ascii')
|
||||
):
|
||||
if not hmac.compare_digest(want_mac, got_mac.encode('ascii')):
|
||||
raise SynapseError(403, "HMAC incorrect")
|
||||
|
||||
# Reuse the parts of RegisterRestServlet to reduce code duplication
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user