Compare commits
180 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ac6a0d72b2 | ||
|
|
9ac72d9543 | ||
|
|
44a4d65586 | ||
|
|
6bb1c028f1 | ||
|
|
f191be822b | ||
|
|
57426ec6a3 | ||
|
|
4bc7483518 | ||
|
|
4dc945ba30 | ||
|
|
802884d4ee | ||
|
|
b8e6ed36c1 | ||
|
|
6fcb25202f | ||
|
|
7a4632af9c | ||
|
|
c0b6955e3b | ||
|
|
c74624a633 | ||
|
|
a1a6473293 | ||
|
|
c4414768af | ||
|
|
a712aa3a9c | ||
|
|
16565e67db | ||
|
|
40c2271680 | ||
|
|
6728bf3940 | ||
|
|
6946c20111 | ||
|
|
71669a0fba | ||
|
|
899a119c2b | ||
|
|
641c409e4e | ||
|
|
70ea2f4e1d | ||
|
|
96c408273e | ||
|
|
1330aa4a8f | ||
|
|
65f3fbfbf7 | ||
|
|
1d0f2ec812 | ||
|
|
c7b333c545 | ||
|
|
69efe6fb16 | ||
|
|
108d5fb20d | ||
|
|
9c598dddcb | ||
|
|
b1a90da82e | ||
|
|
16c7afa94c | ||
|
|
8aaf7ffc44 | ||
|
|
84c0a20dfe | ||
|
|
4b9e5076c4 | ||
|
|
07493607a8 | ||
|
|
bd398b874e | ||
|
|
e4b078a600 | ||
|
|
d730c2c22b | ||
|
|
890cb048fd | ||
|
|
5b9786ee00 | ||
|
|
65d1003d01 | ||
|
|
f5050e148c | ||
|
|
9342cc6ab1 | ||
|
|
21d3f82344 | ||
|
|
47a7e3928d | ||
|
|
65bf9f1119 | ||
|
|
41285ffe5b | ||
|
|
71304bfc8d | ||
|
|
59e0112209 | ||
|
|
d14e94bae4 | ||
|
|
b82c9cf462 | ||
|
|
f2891d2487 | ||
|
|
9982c71515 | ||
|
|
0969d688e3 | ||
|
|
5d3e3c051d | ||
|
|
a164134a53 | ||
|
|
1d9df51ff1 | ||
|
|
e28ef831e6 | ||
|
|
80467bbac3 | ||
|
|
7b288826b7 | ||
|
|
e07384c4e1 | ||
|
|
e1666af9be | ||
|
|
fcd6f01dc7 | ||
|
|
0abb094f1a | ||
|
|
6d65659b62 | ||
|
|
16e0680498 | ||
|
|
b9d6756b14 | ||
|
|
9bccd5e472 | ||
|
|
8184ae8a09 | ||
|
|
82fca11fc1 | ||
|
|
82ca6d1f9f | ||
|
|
633e5c933b | ||
|
|
3d672fec51 | ||
|
|
a06614bd2a | ||
|
|
b2200a8690 | ||
|
|
c88bc53903 | ||
|
|
8d98dc8ffe | ||
|
|
86920ac266 | ||
|
|
dbdc565dfd | ||
|
|
c594cc8076 | ||
|
|
ae753fed8c | ||
|
|
5f9bdf90fe | ||
|
|
c003450057 | ||
|
|
49b58f0a16 | ||
|
|
62175a20e5 | ||
|
|
1bb35e3a83 | ||
|
|
bc8fa1509d | ||
|
|
1c0eb8bbb2 | ||
|
|
a288bdf0b1 | ||
|
|
5a707a2f9a | ||
|
|
a8626901cd | ||
|
|
32590b7139 | ||
|
|
7c70b8f8a6 | ||
|
|
107aeb6915 | ||
|
|
968a30a75c | ||
|
|
0869f01e74 | ||
|
|
2b2466f78b | ||
|
|
561eebe170 | ||
|
|
34ac75ce2c | ||
|
|
92e6fb5c89 | ||
|
|
a9b5ea6fc1 | ||
|
|
f8b9ca53ce | ||
|
|
d154f5a055 | ||
|
|
f3ab0b2390 | ||
|
|
128902d60a | ||
|
|
4cc4400b4d | ||
|
|
fc2c245a1f | ||
|
|
459d3d5046 | ||
|
|
d328a93b51 | ||
|
|
af691e415c | ||
|
|
028267acd2 | ||
|
|
d08bac4136 | ||
|
|
c30f73c86a | ||
|
|
092b541401 | ||
|
|
45bb55c6de | ||
|
|
8b9ae6d3a6 | ||
|
|
94960cef03 | ||
|
|
12ae64ce0d | ||
|
|
fe725f7e45 | ||
|
|
e85aabb030 | ||
|
|
d9713e916e | ||
|
|
04dad5ac16 | ||
|
|
2f16857ca9 | ||
|
|
e07cc31cb8 | ||
|
|
68a53f825f | ||
|
|
32e54b472a | ||
|
|
915421065b | ||
|
|
d1b060b492 | ||
|
|
7033b05cad | ||
|
|
9caab0c364 | ||
|
|
dc5efc92a8 | ||
|
|
e83a190643 | ||
|
|
41c3f21c3b | ||
|
|
91c8a7f9f4 | ||
|
|
eb2b8523ae | ||
|
|
5b68e12fd8 | ||
|
|
6d02a13d81 | ||
|
|
4151111d95 | ||
|
|
6575df647d | ||
|
|
68d2869c8d | ||
|
|
da95867d30 | ||
|
|
bd4505f765 | ||
|
|
f86b695cbd | ||
|
|
af8a2f679b | ||
|
|
1895d14e12 | ||
|
|
b99c532c1c | ||
|
|
02c729d6b0 | ||
|
|
02c46acc6a | ||
|
|
bfcefbb230 | ||
|
|
6f47bc3fb2 | ||
|
|
8e32f26cb8 | ||
|
|
cb12a37708 | ||
|
|
f61b2068e6 | ||
|
|
f666fe36d7 | ||
|
|
bf4fd14806 | ||
|
|
f830a3be2a | ||
|
|
649fe1c2be | ||
|
|
f595d6ac57 | ||
|
|
f311018823 | ||
|
|
4074c8b968 | ||
|
|
eaf4d11af9 | ||
|
|
b02465b9db | ||
|
|
00cf679bf2 | ||
|
|
0927adb012 | ||
|
|
7fc1196a36 | ||
|
|
6cb415b63f | ||
|
|
c6e75c9f2d | ||
|
|
c1dfd6a18a | ||
|
|
bb4fd8f927 | ||
|
|
dc70789056 | ||
|
|
93f7d2df3e | ||
|
|
6a8f902edb | ||
|
|
ef2228c890 | ||
|
|
4588b0d64a | ||
|
|
d528406cb8 | ||
|
|
ae19a7db8c |
84
.gitignore
vendored
84
.gitignore
vendored
@@ -1,64 +1,36 @@
|
||||
*.pyc
|
||||
.*.swp
|
||||
# filename patterns
|
||||
*~
|
||||
.*.swp
|
||||
.#*
|
||||
*.deb
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
|
||||
.DS_Store
|
||||
*.pyc
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
logs/
|
||||
dbs/
|
||||
*.egg
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
pip-wheel-metadata/
|
||||
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
homeserver*.log
|
||||
homeserver*.log.*
|
||||
homeserver*.pid
|
||||
/homeserver*.yaml
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.signing.key
|
||||
/*.tls.crt
|
||||
/*.tls.key
|
||||
/uploads
|
||||
/media_store/
|
||||
|
||||
*.signing.key
|
||||
*.tls.crt
|
||||
*.tls.dh
|
||||
*.tls.key
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
/.vscode/
|
||||
|
||||
.coverage*
|
||||
coverage.*
|
||||
!.coveragerc
|
||||
htmlcov
|
||||
# build products
|
||||
/.coverage*
|
||||
!/.coveragerc
|
||||
/.tox
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
uploads
|
||||
cache
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
|
||||
*.tac
|
||||
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
*venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
.tox
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
*.deb
|
||||
/debs
|
||||
|
||||
60
.travis.yml
60
.travis.yml
@@ -1,4 +1,4 @@
|
||||
sudo: false
|
||||
dist: xenial
|
||||
language: python
|
||||
|
||||
cache:
|
||||
@@ -12,9 +12,6 @@ cache:
|
||||
#
|
||||
- $HOME/.cache/pip/wheels
|
||||
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
|
||||
# don't clone the whole repo history, one commit will do
|
||||
git:
|
||||
depth: 1
|
||||
@@ -25,6 +22,7 @@ branches:
|
||||
- master
|
||||
- develop
|
||||
- /^release-v/
|
||||
- rav/pg95
|
||||
|
||||
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||
@@ -32,46 +30,62 @@ branches:
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
- name: "pep8"
|
||||
python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort,packaging"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / sqlite"
|
||||
python: 2.7
|
||||
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / sqlite / olddeps"
|
||||
python: 2.7
|
||||
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / postgres9.5"
|
||||
python: 2.7
|
||||
addons:
|
||||
postgresql: "9.5"
|
||||
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.5
|
||||
- name: "py3.5 / sqlite"
|
||||
python: 3.5
|
||||
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36,codecov TRIAL_FLAGS="-j 2"
|
||||
- name: "py3.7 / sqlite"
|
||||
python: 3.7
|
||||
env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
- name: "py3.7 / postgres9.4"
|
||||
python: 3.7
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- name: "py3.7 / postgres9.5"
|
||||
python: 3.7
|
||||
addons:
|
||||
postgresql: "9.5"
|
||||
env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- # we only need to check for the newsfragment if it's a PR build
|
||||
if: type = pull_request
|
||||
name: "check-newsfragment"
|
||||
python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
- tox -e $TOX_ENV
|
||||
script: scripts-dev/check-newsfragment
|
||||
|
||||
install:
|
||||
# this just logs the postgres version we will be testing against (if any)
|
||||
- psql -At -U postgres -c 'select version();' || true
|
||||
|
||||
- pip install tox
|
||||
|
||||
|
||||
# if we don't have python3.6 in this environment, travis unhelpfully gives us
|
||||
# a `python3.6` on our path which does nothing but spit out a warning. Tox
|
||||
# tries to run it (even if we're not running a py36 env), so the build logs
|
||||
|
||||
69
CHANGES.md
69
CHANGES.md
@@ -1,3 +1,72 @@
|
||||
Synapse 0.99.2 (2019-03-01)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
|
||||
- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
|
||||
- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
|
||||
- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
|
||||
- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
|
||||
- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
|
||||
- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
|
||||
- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
|
||||
- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
|
||||
- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
|
||||
- The dependency checker now correctly reports a version mismatch for optional
|
||||
dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
|
||||
- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
|
||||
- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
|
||||
- Fix an issue in the database migration script where the
|
||||
`e2e_room_keys.is_verified` column wasn't considered as
|
||||
a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
|
||||
- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
|
||||
- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
|
||||
- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
|
||||
- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
|
||||
- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
|
||||
- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
|
||||
- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
|
||||
- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
|
||||
- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
|
||||
- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
|
||||
- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
|
||||
- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
|
||||
- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
|
||||
- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
|
||||
- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
|
||||
- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
|
||||
- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
|
||||
- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
|
||||
- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
|
||||
- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
|
||||
- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
|
||||
- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
|
||||
|
||||
|
||||
Synapse 0.99.1.1 (2019-02-14)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix "TypeError: '>' not supported" when starting without an existing certificate.
|
||||
Fix a bug where an existing certificate would be reprovisoned every day. ([\#4648](https://github.com/matrix-org/synapse/issues/4648))
|
||||
|
||||
|
||||
Synapse 0.99.1 (2019-02-14)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||
If your change breaks the build, this will be shown in GitHub, so please
|
||||
@@ -74,16 +74,39 @@ entry. These are managed by Towncrier
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix".
|
||||
Matrix.".
|
||||
|
||||
Debian changelog
|
||||
----------------
|
||||
|
||||
Changes which affect the debian packaging files (in ``debian``) are an
|
||||
exception.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command::
|
||||
|
||||
dch
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
35
INSTALL.md
35
INSTALL.md
@@ -358,26 +358,25 @@ For information on using a reverse proxy, see
|
||||
[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`.
|
||||
`homeserver.yaml`, as follows:
|
||||
|
||||
First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) and space at the start of
|
||||
each line). The relevant lines are like this:
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either point
|
||||
these settings at an existing certificate and key, or you can enable Synapse's
|
||||
built-in ACME (Let's Encrypt) support. Instructions for having Synapse
|
||||
automatically provision and renew federation certificates through ACME can be
|
||||
found at [ACME.md](docs/ACME.md).
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
|
||||
## Registering a user
|
||||
|
||||
|
||||
@@ -199,6 +199,8 @@ by installing the ``libjemalloc1`` package and adding this line to
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
@@ -39,7 +39,7 @@ instructions that may be required are listed later in this document.
|
||||
./synctl restart
|
||||
|
||||
|
||||
To check whether your update was sucessful, you can check the Server header
|
||||
To check whether your update was successful, you can check the Server header
|
||||
returned by the Client-Server API:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
13
debian/changelog
vendored
13
debian/changelog
vendored
@@ -1,3 +1,16 @@
|
||||
matrix-synapse-py3 (0.99.2) stable; urgency=medium
|
||||
|
||||
* Fix overwriting of config settings on upgrade.
|
||||
* New synapse release 0.99.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Mar 2019 10:55:08 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 17:19:44 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1) stable; urgency=medium
|
||||
|
||||
[ Damjan Georgievski ]
|
||||
|
||||
1
debian/install
vendored
1
debian/install
vendored
@@ -1 +1,2 @@
|
||||
debian/log.yaml etc/matrix-synapse
|
||||
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
|
||||
|
||||
130
debian/manage_debconf.pl
vendored
Executable file
130
debian/manage_debconf.pl
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Interface between our config files and the debconf database.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# manage_debconf.pl <action>
|
||||
#
|
||||
# where <action> can be:
|
||||
#
|
||||
# read: read the configuration from the yaml into debconf
|
||||
# update: update the yaml config according to the debconf database
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Debconf::Client::ConfModule (qw/get set/);
|
||||
|
||||
# map from the name of a setting in our .yaml file to the relevant debconf
|
||||
# setting.
|
||||
my %MAPPINGS=(
|
||||
server_name => 'matrix-synapse/server-name',
|
||||
report_stats => 'matrix-synapse/report-stats',
|
||||
);
|
||||
|
||||
# enable debug if dpkg --debug
|
||||
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
|
||||
|
||||
sub read_config {
|
||||
my @files = @_;
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "reading $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
# rudimentary parsing which (a) avoids having to depend on a yaml library,
|
||||
# and (b) is tolerant of yaml errors
|
||||
while($_ = <$FH>) {
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
$setting = quotemeta $setting;
|
||||
if(/^${setting}\s*:(.*)$/) {
|
||||
my $val = $1;
|
||||
|
||||
# remove leading/trailing whitespace
|
||||
$val =~ s/^\s*//;
|
||||
$val =~ s/\s*$//;
|
||||
|
||||
# remove surrounding quotes
|
||||
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
|
||||
$val = $1;
|
||||
}
|
||||
|
||||
print STDERR ">> $debconf = $val\n" if $DEBUG;
|
||||
set($debconf, $val);
|
||||
}
|
||||
}
|
||||
}
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
sub update_config {
|
||||
my @files = @_;
|
||||
|
||||
my %substs = ();
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
my @res = get($debconf);
|
||||
$substs{$setting} = $res[1] if $res[0] == 0;
|
||||
}
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "checking $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
my $updated = 0;
|
||||
|
||||
# read the whole file into memory
|
||||
my @lines = <$FH>;
|
||||
|
||||
while (my ($setting, $val) = each %substs) {
|
||||
$setting = quotemeta $setting;
|
||||
|
||||
map {
|
||||
if (/^${setting}\s*:\s*(.*)\s*$/) {
|
||||
my $current = $1;
|
||||
if ($val ne $current) {
|
||||
$_ = "${setting}: $val\n";
|
||||
$updated = 1;
|
||||
}
|
||||
}
|
||||
} @lines;
|
||||
}
|
||||
close $FH;
|
||||
|
||||
next unless $updated;
|
||||
|
||||
print STDERR "updating $file\n" if $DEBUG;
|
||||
open $FH, ">", $file or die "unable to update $file";
|
||||
print $FH @lines;
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
|
||||
my $read = 0;
|
||||
my $update = 0;
|
||||
|
||||
if (not $cmd) {
|
||||
die "must specify a command to perform\n";
|
||||
} elsif ($cmd eq 'read') {
|
||||
$read = 1;
|
||||
} elsif ($cmd eq 'update') {
|
||||
$update = 1;
|
||||
} else {
|
||||
die "unknown command '$cmd'\n";
|
||||
}
|
||||
|
||||
my @files = (
|
||||
"/etc/matrix-synapse/homeserver.yaml",
|
||||
glob("/etc/matrix-synapse/conf.d/*.yaml"),
|
||||
);
|
||||
|
||||
if ($read) {
|
||||
read_config(@files);
|
||||
} elsif ($update) {
|
||||
update_config(@files);
|
||||
}
|
||||
@@ -4,6 +4,9 @@ set -e
|
||||
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
# try to update the debconf db according to whatever is in the config files
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
|
||||
|
||||
db_input high matrix-synapse/server-name || true
|
||||
db_input high matrix-synapse/report-stats || true
|
||||
db_go
|
||||
31
debian/matrix-synapse-py3.postinst
vendored
31
debian/matrix-synapse-py3.postinst
vendored
@@ -8,19 +8,36 @@ USER="matrix-synapse"
|
||||
|
||||
case "$1" in
|
||||
configure|reconfigure)
|
||||
# Set server name in config file
|
||||
|
||||
# generate template config files if they don't exist
|
||||
mkdir -p "/etc/matrix-synapse/conf.d/"
|
||||
db_get matrix-synapse/server-name
|
||||
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
|
||||
cat > "$CONFIGFILE_SERVERNAME" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
if [ "$RET" ]; then
|
||||
echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
|
||||
# The domain name of the server, with optional explicit port.
|
||||
# This is used by remote servers to connect to this server,
|
||||
# e.g. matrix.org, localhost:8080, etc.
|
||||
# This is also the last part of your UserID.
|
||||
#
|
||||
server_name: ''
|
||||
EOF
|
||||
fi
|
||||
|
||||
db_get matrix-synapse/report-stats
|
||||
if [ "$RET" ]; then
|
||||
echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
|
||||
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
|
||||
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
# Whether to report anonymized homeserver usage statistics.
|
||||
report_stats: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
# update the config files according to whatever is in the debconf database
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
7
demo/.gitignore
vendored
Normal file
7
demo/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
*.pid
|
||||
|
||||
/media_store.*
|
||||
/etc
|
||||
@@ -58,7 +58,11 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
sqlite3
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
|
||||
RUN apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
# cached cache from docker the first time.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
WORKDIR /synapse/source
|
||||
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
|
||||
|
||||
19
docs/ACME.md
19
docs/ACME.md
@@ -10,13 +10,14 @@ through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
|
||||
|
||||
In the case that your `server_name` config variable is the same as
|
||||
the hostname that the client connects to, then the same certificate can be
|
||||
used between client and federation ports without issue.
|
||||
used between client and federation ports without issue.
|
||||
|
||||
For a sample configuration, please inspect the new ACME section in the example
|
||||
generated config by running the `generate-config` executable. For example:
|
||||
If your configuration file does not already have an `acme` section, you can
|
||||
generate an example config by running the `generate_config` executable. For
|
||||
example:
|
||||
|
||||
```
|
||||
~/synapse/env3/bin/generate-config
|
||||
~/synapse/env3/bin/generate_config
|
||||
```
|
||||
|
||||
You will need to provide Let's Encrypt (or another ACME provider) access to
|
||||
@@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
|
||||
(Do not run Synapse with root permissions!) Detailed instructions are
|
||||
available under "ACME setup" below.
|
||||
|
||||
If you are already using self-signed certificates, you will need to back up
|
||||
or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
|
||||
Synapse's root directory), Synapse's ACME implementation will not overwrite
|
||||
them.
|
||||
If you already have certificates, you will need to back up or delete them
|
||||
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||
directory), Synapse's ACME implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
@@ -87,7 +87,6 @@ acme:
|
||||
port: 8009
|
||||
```
|
||||
|
||||
|
||||
#### Authbind
|
||||
|
||||
`authbind` allows a program which does not run as root to bind to
|
||||
@@ -127,4 +126,4 @@ acme:
|
||||
|
||||
Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
|
||||
|
||||
Finally, start/restart Synapse.
|
||||
Finally, start/restart Synapse.
|
||||
|
||||
@@ -125,7 +125,7 @@ doing one of the following:
|
||||
* Use Synapse's [ACME support](./ACME.md), and forward port 80 on the
|
||||
`server_name` domain to your Synapse instance.
|
||||
|
||||
### Option 2: run Synapse behind a reverse proxy
|
||||
#### Option 2: run Synapse behind a reverse proxy
|
||||
|
||||
If you have an existing reverse proxy set up with correct TLS certificates for
|
||||
your domain, you can simply route all traffic through the reverse proxy by
|
||||
|
||||
@@ -79,12 +79,30 @@ Let's assume that we expect clients to connect to our server at
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
<Location />
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
* HAProxy::
|
||||
|
||||
frontend https
|
||||
bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix hdr(host) -i matrix.example.com
|
||||
use_backend matrix if matrix
|
||||
|
||||
frontend matrix-federation
|
||||
bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
@@ -137,7 +137,6 @@ for each stream so that on reconneciton it can start streaming from the correct
|
||||
place. Note: not all RDATA have valid tokens due to batching. See
|
||||
``RdataCommand`` for more details.
|
||||
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
@@ -221,3 +220,28 @@ SYNC (S, C)
|
||||
|
||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||
format of each command.
|
||||
|
||||
|
||||
Cache Invalidation Stream
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The cache invalidation stream is used to inform workers when they need to
|
||||
invalidate any of their caches in the data store. This is done by streaming all
|
||||
cache invalidations done on master down to the workers, assuming that any caches
|
||||
on the workers also exist on the master.
|
||||
|
||||
Each individual cache invalidation results in a row being sent down replication,
|
||||
which includes the cache name (the name of the function) and they key to
|
||||
invalidate. For example::
|
||||
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated at the
|
||||
same time with the same key. To reduce traffic we batch those invalidations into
|
||||
a single poke by defining a special cache name that workers understand to mean
|
||||
to expand to invalidate the correct caches.
|
||||
|
||||
Currently the special cache names are declared in ``synapse/storage/_base.py``
|
||||
and are:
|
||||
|
||||
1. ``cs_cache_fake`` ─ invalidates caches that depend on the current state
|
||||
|
||||
@@ -222,6 +222,13 @@ following regular expressions::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance::
|
||||
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
41
scripts-dev/check-newsfragment
Executable file
41
scripts-dev/check-newsfragment
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
set -e
|
||||
|
||||
# make sure that origin/develop is up to date
|
||||
git remote set-branches --add origin develop
|
||||
git fetch --depth=1 origin develop
|
||||
|
||||
UPSTREAM=origin/develop
|
||||
|
||||
# if there are changes in the debian directory, check that the debian changelog
|
||||
# has been updated
|
||||
if ! git diff --quiet $UPSTREAM... -- debian; then
|
||||
if git diff --quiet $UPSTREAM... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if there are changes *outside* the debian directory, check that the
|
||||
# newsfragments have been updated.
|
||||
if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
|
||||
tox -e check-newsfragment
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "--------------------------"
|
||||
echo
|
||||
|
||||
# check that any new newsfiles on this branch end with a full stop.
|
||||
for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
|
||||
lastchar=`tr -d '\n' < $f | tail -c 1`
|
||||
if [ $lastchar != '.' ]; then
|
||||
echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
|
||||
"group_summary_users": ["is_public"],
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.99.1"
|
||||
__version__ = "0.99.2"
|
||||
|
||||
@@ -25,10 +25,12 @@ from daemonize import Daemonize
|
||||
from twisted.internet import error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -270,9 +272,37 @@ def start(hs, listeners=None):
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().start_profiling()
|
||||
|
||||
setup_sentry(hs)
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
reactor = hs.get_reactor()
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup_sentry(hs):
|
||||
"""Enable sentry integration, if enabled in configuration
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
"""
|
||||
|
||||
if not hs.config.sentry_enabled:
|
||||
return
|
||||
|
||||
import sentry_sdk
|
||||
sentry_sdk.init(
|
||||
dsn=hs.config.sentry_dsn,
|
||||
release=get_version_string(synapse),
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server_name)
|
||||
|
||||
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
@@ -47,6 +48,7 @@ from synapse.rest.client.v1.room import (
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -92,6 +94,8 @@ class ClientReaderServer(HomeServer):
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
RegisterRestServlet(self).register(resource)
|
||||
LoginRestServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
@@ -62,6 +63,7 @@ class FederationReaderSlavedStore(
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
|
||||
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -66,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
try:
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,11 +15,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -27,6 +29,7 @@ from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
@@ -394,10 +397,10 @@ def setup(config_options):
|
||||
# is less than our re-registration threshold.
|
||||
provision = False
|
||||
|
||||
if (cert_days_remaining is None):
|
||||
provision = True
|
||||
|
||||
if cert_days_remaining > hs.config.acme_reprovision_threshold:
|
||||
if (
|
||||
cert_days_remaining is None or
|
||||
cert_days_remaining < hs.config.acme_reprovision_threshold
|
||||
):
|
||||
provision = True
|
||||
|
||||
if provision:
|
||||
@@ -438,7 +441,11 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
|
||||
# this gives better tracebacks than traceback.print_exc()
|
||||
Failure().printTraceback(file=sys.stderr)
|
||||
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
@@ -548,6 +555,9 @@ def run(hs):
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
|
||||
@@ -33,6 +33,7 @@ class ApiConfig(Config):
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
#
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
|
||||
@@ -38,10 +38,12 @@ class AppServiceConfig(Config):
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
#
|
||||
app_service_config_files: []
|
||||
|
||||
# Whether or not to track application service IP addresses. Implicitly
|
||||
# enables MAU tracking for application service users.
|
||||
#
|
||||
track_appservice_user_ips: False
|
||||
"""
|
||||
|
||||
|
||||
@@ -30,19 +30,22 @@ class CaptchaConfig(Config):
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
#
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
#
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
# public/private key.
|
||||
#
|
||||
enable_registration_captcha: False
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
|
||||
"""
|
||||
|
||||
@@ -38,6 +38,7 @@ class CasConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable CAS for registration and login.
|
||||
#
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
|
||||
@@ -54,20 +54,20 @@ DEFAULT_CONFIG = """\
|
||||
# for an account. Has no effect unless `require_at_registration` is enabled.
|
||||
# Defaults to "Privacy Policy".
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
|
||||
@@ -24,9 +24,11 @@ class GroupsConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# Whether to allow non server admins to create groups on this server
|
||||
#
|
||||
enable_group_creation: false
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# group_creation_prefix: "unofficial/"
|
||||
#
|
||||
#group_creation_prefix: "unofficial/"
|
||||
"""
|
||||
|
||||
@@ -46,8 +46,8 @@ class JWTConfig(Config):
|
||||
return """\
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
#jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
"""
|
||||
|
||||
@@ -40,7 +40,7 @@ class KeyConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.signing_key = self.read_signing_key(config["signing_key_path"])
|
||||
self.old_signing_keys = self.read_old_signing_keys(
|
||||
config["old_signing_keys"]
|
||||
config.get("old_signing_keys", {})
|
||||
)
|
||||
self.key_refresh_interval = self.parse_duration(
|
||||
config["key_refresh_interval"]
|
||||
@@ -56,7 +56,7 @@ class KeyConfig(Config):
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing missing macaroon_secret_key")
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
@@ -83,24 +83,29 @@ class KeyConfig(Config):
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
# the registration_shared_secret is used, if one is given; otherwise,
|
||||
# a secret key is derived from the signing key.
|
||||
#
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
#
|
||||
%(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
#
|
||||
signing_key_path: "%(base_key_name)s.signing.key"
|
||||
|
||||
# The keys that the server used to sign messages with but won't use
|
||||
# to sign new messages. E.g. it has lost its private key
|
||||
old_signing_keys: {}
|
||||
#
|
||||
#old_signing_keys:
|
||||
# "ed25519:auto":
|
||||
# # Base64 encoded public key
|
||||
# key: "The public part of your old signing key."
|
||||
@@ -111,9 +116,11 @@ class KeyConfig(Config):
|
||||
# Used to set the valid_until_ts in /key/v2 APIs.
|
||||
# Determines how quickly servers will query to check which keys
|
||||
# are still valid.
|
||||
#
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
#
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
|
||||
@@ -83,6 +83,7 @@ class LoggingConfig(Config):
|
||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||
return """
|
||||
# A yaml python logging config file
|
||||
#
|
||||
log_config: "%(log_config)s"
|
||||
""" % locals()
|
||||
|
||||
@@ -242,3 +243,5 @@ def setup_logging(config, use_worker_options=False):
|
||||
[_log],
|
||||
redirectStandardIO=not config.no_redirect_stdio,
|
||||
)
|
||||
if not config.no_redirect_stdio:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
@@ -13,7 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_SENTRY = (
|
||||
"""Missing sentry-sdk library. This is required to enable sentry
|
||||
integration.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class MetricsConfig(Config):
|
||||
@@ -23,12 +29,38 @@ class MetricsConfig(Config):
|
||||
self.metrics_port = config.get("metrics_port")
|
||||
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
|
||||
|
||||
self.sentry_enabled = "sentry" in config
|
||||
if self.sentry_enabled:
|
||||
try:
|
||||
import sentry_sdk # noqa F401
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_SENTRY)
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
raise ConfigError(
|
||||
"sentry.dsn field is required when sentry integration is enabled",
|
||||
)
|
||||
|
||||
def default_config(self, report_stats=None, **kwargs):
|
||||
res = """\
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
#
|
||||
enable_metrics: False
|
||||
|
||||
# Enable sentry integration
|
||||
# NOTE: While attempts are made to ensure that the logs don't contain
|
||||
# any sensitive information, this cannot be guaranteed. By enabling
|
||||
# this option the sentry server may therefore receive sensitive
|
||||
# information, and it in turn may then diseminate sensitive information
|
||||
# through insecure notification channels if so configured.
|
||||
#
|
||||
#sentry:
|
||||
# dsn: "..."
|
||||
|
||||
# Whether or not to report anonymized homeserver usage statistics.
|
||||
"""
|
||||
|
||||
if report_stats is None:
|
||||
|
||||
@@ -28,6 +28,7 @@ class PasswordConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable password for login.
|
||||
#
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
|
||||
@@ -52,18 +52,18 @@ class PasswordAuthProviderConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
#password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
|
||||
@@ -51,11 +51,11 @@ class PushConfig(Config):
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
# include_content: true
|
||||
"""
|
||||
|
||||
@@ -32,27 +32,34 @@ class RatelimitConfig(Config):
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
#
|
||||
rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
#
|
||||
rc_message_burst_count: 10.0
|
||||
|
||||
# The federation window size in milliseconds
|
||||
#
|
||||
federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
#
|
||||
federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
#
|
||||
federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
#
|
||||
federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
#
|
||||
federation_rc_concurrent: 3
|
||||
"""
|
||||
|
||||
@@ -70,28 +70,29 @@ class RegistrationConfig(Config):
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
# registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
#registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Explicitly disable asking for MSISDNs from the registration
|
||||
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
|
||||
#
|
||||
# disable_msisdn_registration = True
|
||||
#disable_msisdn_registration: True
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
#
|
||||
%(registration_shared_secret)s
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
@@ -99,11 +100,13 @@ class RegistrationConfig(Config):
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
#
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
# participate in rooms hosted on this server which have been made
|
||||
# accessible to anonymous users.
|
||||
#
|
||||
allow_guest_access: False
|
||||
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
@@ -112,27 +115,30 @@ class RegistrationConfig(Config):
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
# default_identity_server: https://matrix.org
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
# - "#example:example.com"
|
||||
|
||||
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||
# the rooms exist by creating them when the first user on the
|
||||
# homeserver registers.
|
||||
# Setting to false means that if the rooms are not manually created,
|
||||
# users cannot be auto-joined since they do not exist.
|
||||
#
|
||||
autocreate_auto_join_rooms: true
|
||||
""" % locals()
|
||||
|
||||
|
||||
@@ -180,29 +180,34 @@ class ContentRepositoryConfig(Config):
|
||||
uploads_path = os.path.join(data_dir_path, "uploads")
|
||||
return r"""
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
#
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
# Media storage providers allow media to be stored in different
|
||||
# locations.
|
||||
# media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
# Directory where in-progress uploads are stored.
|
||||
#
|
||||
uploads_path: "%(uploads_path)s"
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
#
|
||||
max_upload_size: "10M"
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
#
|
||||
max_image_pixels: "32M"
|
||||
|
||||
# Whether to generate new thumbnails on the fly to precisely match
|
||||
@@ -210,9 +215,11 @@ class ContentRepositoryConfig(Config):
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalculated list.
|
||||
#
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
# List of thumbnails to precalculate when an image is uploaded.
|
||||
#
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
@@ -233,6 +240,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
#
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
@@ -243,16 +251,16 @@ class ContentRepositoryConfig(Config):
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
@@ -260,8 +268,8 @@ class ContentRepositoryConfig(Config):
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
#url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
@@ -279,26 +287,25 @@ class ContentRepositoryConfig(Config):
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -20,12 +20,37 @@ from ._base import Config, ConfigError
|
||||
|
||||
class RoomDirectoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
alias_creation_rules = config["alias_creation_rules"]
|
||||
alias_creation_rules = config.get("alias_creation_rules")
|
||||
|
||||
self._alias_creation_rules = [
|
||||
_AliasRule(rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
if alias_creation_rules is not None:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule("alias_creation_rules", rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
else:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"alias_creation_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
room_list_publication_rules = config.get("room_list_publication_rules")
|
||||
|
||||
if room_list_publication_rules is not None:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule("room_list_publication_rules", rule)
|
||||
for rule in room_list_publication_rules
|
||||
]
|
||||
else:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"room_list_publication_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -33,60 +58,138 @@ class RoomDirectoryConfig(Config):
|
||||
# on this server.
|
||||
#
|
||||
# The format of this option is a list of rules that contain globs that
|
||||
# match against user_id and the new alias (fully qualified with server
|
||||
# name). The action in the first rule that matches is taken, which can
|
||||
# currently either be "allow" or "deny".
|
||||
# match against user_id, room_id and the new alias (fully qualified with
|
||||
# server name). The action in the first rule that matches is taken,
|
||||
# which can currently either be "allow" or "deny".
|
||||
#
|
||||
# If no rules match the request is denied.
|
||||
alias_creation_rules:
|
||||
- user_id: "*"
|
||||
alias: "*"
|
||||
action: allow
|
||||
# Missing user_id/room_id/alias fields default to "*".
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can create aliases.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches against the creator of the alias
|
||||
# alias: Matches against the alias being created
|
||||
# room_id: Matches against the room ID the alias is being pointed at
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#alias_creation_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
|
||||
# The `room_list_publication_rules` option controls who can publish and
|
||||
# which rooms can be published in the public room list.
|
||||
#
|
||||
# The format of this option is the same as that for
|
||||
# `alias_creation_rules`.
|
||||
#
|
||||
# If the room has one or more aliases associated with it, only one of
|
||||
# the aliases needs to match the alias rule. If there are no aliases
|
||||
# then only rules with `alias: *` match.
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can publish rooms.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches agaisnt the creator of the alias
|
||||
# room_id: Matches against the room ID being published
|
||||
# alias: Matches against any current local or canonical aliases
|
||||
# associated with the room
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#room_list_publication_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
"""
|
||||
|
||||
def is_alias_creation_allowed(self, user_id, alias):
|
||||
def is_alias_creation_allowed(self, user_id, room_id, alias):
|
||||
"""Checks if the given user is allowed to create the given alias
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean: True if user is allowed to crate the alias
|
||||
"""
|
||||
for rule in self._alias_creation_rules:
|
||||
if rule.matches(user_id, alias):
|
||||
if rule.matches(user_id, room_id, [alias]):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
def is_publishing_room_allowed(self, user_id, room_id, aliases):
|
||||
"""Checks if the given user is allowed to publish the room
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): any local aliases associated with the room
|
||||
|
||||
Returns:
|
||||
boolean: True if user can publish room
|
||||
"""
|
||||
for rule in self._room_list_publication_rules:
|
||||
if rule.matches(user_id, room_id, aliases):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class _AliasRule(object):
|
||||
def __init__(self, rule):
|
||||
class _RoomDirectoryRule(object):
|
||||
"""Helper class to test whether a room directory action is allowed, like
|
||||
creating an alias or publishing a room.
|
||||
"""
|
||||
|
||||
def __init__(self, option_name, rule):
|
||||
"""
|
||||
Args:
|
||||
option_name (str): Name of the config option this rule belongs to
|
||||
rule (dict): The rule as specified in the config
|
||||
"""
|
||||
|
||||
action = rule["action"]
|
||||
user_id = rule["user_id"]
|
||||
alias = rule["alias"]
|
||||
user_id = rule.get("user_id", "*")
|
||||
room_id = rule.get("room_id", "*")
|
||||
alias = rule.get("alias", "*")
|
||||
|
||||
if action in ("allow", "deny"):
|
||||
self.action = action
|
||||
else:
|
||||
raise ConfigError(
|
||||
"alias_creation_rules rules can only have action of 'allow'"
|
||||
" or 'deny'"
|
||||
"%s rules can only have action of 'allow'"
|
||||
" or 'deny'" % (option_name,)
|
||||
)
|
||||
|
||||
self._alias_matches_all = alias == "*"
|
||||
|
||||
try:
|
||||
self._user_id_regex = glob_to_regex(user_id)
|
||||
self._alias_regex = glob_to_regex(alias)
|
||||
self._room_id_regex = glob_to_regex(room_id)
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||
|
||||
def matches(self, user_id, alias):
|
||||
"""Tests if this rule matches the given user_id and alias.
|
||||
def matches(self, user_id, room_id, aliases):
|
||||
"""Tests if this rule matches the given user_id, room_id and aliases.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
alias (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): The associated aliases to the room. Will be a
|
||||
single element for testing alias creation, and can be empty for
|
||||
testing room publishing.
|
||||
|
||||
Returns:
|
||||
boolean
|
||||
@@ -96,7 +199,22 @@ class _AliasRule(object):
|
||||
if not self._user_id_regex.match(user_id):
|
||||
return False
|
||||
|
||||
if not self._alias_regex.match(alias):
|
||||
if not self._room_id_regex.match(room_id):
|
||||
return False
|
||||
|
||||
return True
|
||||
# We only have alias checks left, so we can short circuit if the alias
|
||||
# rule matches everything.
|
||||
if self._alias_matches_all:
|
||||
return True
|
||||
|
||||
# If we are not given any aliases then this rule only matches if the
|
||||
# alias glob matches all aliases, which we checked above.
|
||||
if not aliases:
|
||||
return False
|
||||
|
||||
# Otherwise, we just need one alias to match
|
||||
for alias in aliases:
|
||||
if self._alias_regex.match(alias):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -67,44 +67,43 @@ class SAML2Config(Config):
|
||||
return """
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
#
|
||||
# saml2_config:
|
||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||
# See pysaml2 docs for format of config.
|
||||
#
|
||||
# # The following is the configuration for the pysaml2 Service Provider.
|
||||
# # See pysaml2 docs for format of config.
|
||||
# #
|
||||
# # Default values will be used for the 'entityid' and 'service' settings,
|
||||
# # so it is not normally necessary to specify them unless you need to
|
||||
# # override them.
|
||||
# Default values will be used for the 'entityid' and 'service' settings,
|
||||
# so it is not normally necessary to specify them unless you need to
|
||||
# override them.
|
||||
#
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# # local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#saml2_config:
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# #local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # The following is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The rest of sp_config is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
#
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# # config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
""" % {"config_dir_path": config_dir_path}
|
||||
|
||||
@@ -286,19 +286,20 @@ class ServerConfig(Config):
|
||||
#
|
||||
# This setting requires the affinity package to be installed!
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
#cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# The path to the web client which will be served at /_matrix/client/
|
||||
# if 'webclient' is configured under the 'listeners' configuration.
|
||||
#
|
||||
# web_client_location: "/path/to/web/root"
|
||||
#web_client_location: "/path/to/web/root"
|
||||
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
# enter into the 'custom HS URL' field on their client. If you
|
||||
# use synapse with a reverse proxy, this should be the URL to reach
|
||||
# synapse via the proxy.
|
||||
# public_baseurl: https://example.com/
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
@@ -309,15 +310,25 @@ class ServerConfig(Config):
|
||||
use_presence: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# filter_timeline_limit: 5000
|
||||
#
|
||||
#filter_timeline_limit: 5000
|
||||
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
#
|
||||
#block_non_admin_invites: True
|
||||
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
#
|
||||
#enable_search: false
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
@@ -325,7 +336,7 @@ class ServerConfig(Config):
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
# federation_domain_whitelist:
|
||||
#federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
@@ -397,11 +408,11 @@ class ServerConfig(Config):
|
||||
# will also need to give Synapse a TLS key and certificate: see the TLS section
|
||||
# below.)
|
||||
#
|
||||
# - port: %(bind_port)s
|
||||
# type: http
|
||||
# tls: true
|
||||
# resources:
|
||||
# - names: [client, federation]
|
||||
#- port: %(bind_port)s
|
||||
# type: http
|
||||
# tls: true
|
||||
# resources:
|
||||
# - names: [client, federation]
|
||||
|
||||
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
|
||||
# that unwraps TLS.
|
||||
@@ -421,52 +432,49 @@ class ServerConfig(Config):
|
||||
|
||||
# example additonal_resources:
|
||||
#
|
||||
# additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
#additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
#
|
||||
#- port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
## Homeserver blocking ##
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#admin_contact: 'mailto:admin@server.com'
|
||||
|
||||
# Global blocking
|
||||
#
|
||||
#hs_disabled: False
|
||||
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
#mau_stats_only: False
|
||||
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
#
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
# enable_search: true
|
||||
#mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
|
||||
@@ -30,11 +30,11 @@ DEFAULT_CONFIG = """\
|
||||
# It's also possible to override the room name, the display name of the
|
||||
# "notices" user, and the avatar for the user.
|
||||
#
|
||||
# server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
#server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ class SpamCheckerConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
#spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
|
||||
@@ -19,6 +19,8 @@ import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
@@ -36,12 +38,15 @@ class TlsConfig(Config):
|
||||
acme_config = {}
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
self.acme_url = acme_config.get(
|
||||
|
||||
# hyperlink complains on py2 if this is not a Unicode
|
||||
self.acme_url = six.text_type(acme_config.get(
|
||||
"url", u"https://acme-v01.api.letsencrypt.org/directory"
|
||||
)
|
||||
))
|
||||
self.acme_port = acme_config.get("port", 80)
|
||||
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
|
||||
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
|
||||
self.acme_domain = acme_config.get("domain", config.get("server_name"))
|
||||
|
||||
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
||||
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
||||
@@ -54,7 +59,7 @@ class TlsConfig(Config):
|
||||
)
|
||||
if not self.tls_private_key_file:
|
||||
raise ConfigError(
|
||||
"tls_certificate_path must be specified if TLS-enabled listeners are "
|
||||
"tls_private_key_path must be specified if TLS-enabled listeners are "
|
||||
"configured."
|
||||
)
|
||||
|
||||
@@ -176,10 +181,11 @@ class TlsConfig(Config):
|
||||
# See 'ACME support' below to enable auto-provisioning this certificate via
|
||||
# Let's Encrypt.
|
||||
#
|
||||
# tls_certificate_path: "%(tls_certificate_path)s"
|
||||
#tls_certificate_path: "%(tls_certificate_path)s"
|
||||
|
||||
# PEM-encoded private key for TLS
|
||||
# tls_private_key_path: "%(tls_private_key_path)s"
|
||||
#
|
||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
@@ -206,28 +212,42 @@ class TlsConfig(Config):
|
||||
# ACME support is disabled by default. Uncomment the following line
|
||||
# (and tls_certificate_path and tls_private_key_path above) to enable it.
|
||||
#
|
||||
# enabled: true
|
||||
#enabled: true
|
||||
|
||||
# Endpoint to use to request certificates. If you only want to test,
|
||||
# use Let's Encrypt's staging url:
|
||||
# https://acme-staging.api.letsencrypt.org/directory
|
||||
#
|
||||
# url: https://acme-v01.api.letsencrypt.org/directory
|
||||
#url: https://acme-v01.api.letsencrypt.org/directory
|
||||
|
||||
# Port number to listen on for the HTTP-01 challenge. Change this if
|
||||
# you are forwarding connections through Apache/Nginx/etc.
|
||||
#
|
||||
# port: 80
|
||||
#port: 80
|
||||
|
||||
# Local addresses to listen on for incoming connections.
|
||||
# Again, you may want to change this if you are forwarding connections
|
||||
# through Apache/Nginx/etc.
|
||||
#
|
||||
# bind_addresses: ['::', '0.0.0.0']
|
||||
#bind_addresses: ['::', '0.0.0.0']
|
||||
|
||||
# How many days remaining on a certificate before it is renewed.
|
||||
#
|
||||
# reprovision_threshold: 30
|
||||
#reprovision_threshold: 30
|
||||
|
||||
# The domain that the certificate should be for. Normally this
|
||||
# should be the same as your Matrix domain (i.e., 'server_name'), but,
|
||||
# by putting a file at 'https://<server_name>/.well-known/matrix/server',
|
||||
# you can delegate incoming traffic to another server. If you do that,
|
||||
# you should give the target of the delegation here.
|
||||
#
|
||||
# For example: if your 'server_name' is 'example.com', but
|
||||
# 'https://example.com/.well-known/matrix/server' delegates to
|
||||
# 'matrix.example.com', you should put 'matrix.example.com' here.
|
||||
#
|
||||
# If not set, defaults to your 'server_name'.
|
||||
#
|
||||
#domain: matrix.example.com
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
@@ -254,8 +274,7 @@ class TlsConfig(Config):
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
"""
|
||||
% locals()
|
||||
|
||||
@@ -40,5 +40,5 @@ class UserDirectoryConfig(Config):
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
# search_all_users: false
|
||||
"""
|
||||
|
||||
@@ -27,20 +27,24 @@ class VoipConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Turn ##
|
||||
## TURN ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
#
|
||||
#turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
#
|
||||
#turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
#
|
||||
turn_user_lifetime: "1h"
|
||||
|
||||
# Whether guests should be allowed to use the TURN server.
|
||||
@@ -48,5 +52,6 @@ class VoipConfig(Config):
|
||||
# However, it does introduce a slight security risk as it allows users to
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
#
|
||||
turn_allow_guests: True
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
@@ -105,9 +107,7 @@ class ClientTLSOptions(object):
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
self._sendSNI = True
|
||||
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
@@ -128,10 +128,8 @@ class ClientTLSOptionsFactory(object):
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
pass
|
||||
self._options = CertificateOptions(verify=False)
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host,
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
||||
return ClientTLSOptions(host, self._options._makeContext())
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import raise_from
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
@@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
@@ -44,6 +50,7 @@ from synapse.util.logcontext import (
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -367,13 +374,18 @@ class Keyring(object):
|
||||
server_name_and_key_ids, perspective_name, perspective_keys
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
"Key lookup failed from %r: %s", perspective_name, e,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
@@ -421,21 +433,30 @@ class Keyring(object):
|
||||
# TODO(mark): Set the minimum_valid_until_ts to that needed by
|
||||
# the events being validated or the current time if validating
|
||||
# an incoming request.
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
try:
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
keys = {}
|
||||
|
||||
@@ -502,11 +523,20 @@ class Keyring(object):
|
||||
if requested_key_id in keys:
|
||||
continue
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
@@ -656,7 +686,7 @@ def _handle_key_deferred(verify_request):
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
except (IOError, RequestSendFailed) as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e),
|
||||
|
||||
@@ -33,6 +33,7 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
@@ -792,10 +793,25 @@ class FederationClient(FederationBase):
|
||||
defer.returnValue(content)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API.
|
||||
|
||||
if room_version in (RoomVersions.V1, RoomVersions.V2):
|
||||
pass # We'll fall through
|
||||
else:
|
||||
raise Exception("Remote server is too old")
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
|
||||
@@ -25,9 +25,10 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
FederationError,
|
||||
IncompatibleRoomVersionError,
|
||||
NotFoundError,
|
||||
@@ -239,8 +240,9 @@ class FederationServer(FederationBase):
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s: %s",
|
||||
event_id, f.getTraceback().rstrip(),
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
yield concurrently_execute(
|
||||
@@ -386,6 +388,13 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
@@ -33,7 +33,6 @@ from synapse.metrics import (
|
||||
event_processing_loop_counter,
|
||||
event_processing_loop_room_count,
|
||||
events_processed_counter,
|
||||
sent_edus_counter,
|
||||
sent_transactions_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -47,10 +46,24 @@ from .units import Edu, Transaction
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:count", ""
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total", ""
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
sent_edus_counter = Counter(
|
||||
"synapse_federation_client_sent_edus",
|
||||
"Total number of EDUs successfully sent",
|
||||
)
|
||||
|
||||
sent_edus_by_type = Counter(
|
||||
"synapse_federation_client_sent_edus_by_type",
|
||||
"Number of sent EDUs successfully sent, by event type",
|
||||
["type"],
|
||||
)
|
||||
|
||||
|
||||
@@ -360,8 +373,6 @@ class TransactionQueue(object):
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
@@ -496,6 +507,9 @@ class TransactionQueue(object):
|
||||
)
|
||||
if success:
|
||||
sent_transactions_counter.inc()
|
||||
sent_edus_counter.inc(len(pending_edus))
|
||||
for edu in pending_edus:
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
|
||||
@@ -736,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
data = yield self.handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
network_tuple=network_tuple,
|
||||
from_federation=True,
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
@@ -191,6 +191,11 @@ class GroupAttestionRenewer(object):
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Failed to renew attestation of %r in %r: %s",
|
||||
user_id, group_id, e,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error renewing attestation of %r in %r",
|
||||
user_id, group_id)
|
||||
|
||||
@@ -113,8 +113,7 @@ class GroupsServerHandler(object):
|
||||
room_id = room_entry["room_id"]
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
entry = dict(entry) # so we don't change whats cached
|
||||
entry.pop("room_id", None)
|
||||
@@ -544,8 +543,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
|
||||
if not entry:
|
||||
|
||||
@@ -17,7 +17,6 @@ from .admin import AdminHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .federation import FederationHandler
|
||||
from .identity import IdentityHandler
|
||||
from .register import RegistrationHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
@@ -41,7 +40,6 @@ class Handlers(object):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.registration_handler = RegistrationHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
|
||||
@@ -167,4 +167,4 @@ class BaseHandler(object):
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error kicking guest user: %s" % (e,))
|
||||
logger.exception("Error kicking guest user: %s" % (e,))
|
||||
|
||||
@@ -56,6 +56,7 @@ class AcmeHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
self._acme_domain = hs.config.acme_domain
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_listening(self):
|
||||
@@ -123,15 +124,15 @@ class AcmeHandler(object):
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
|
||||
logger.warning("Reprovisioning %s", self.hs.hostname)
|
||||
logger.warning("Reprovisioning %s", self._acme_domain)
|
||||
|
||||
try:
|
||||
yield self._issuer.issue_cert(self.hs.hostname)
|
||||
yield self._issuer.issue_cert(self._acme_domain)
|
||||
except Exception:
|
||||
logger.exception("Fail!")
|
||||
raise
|
||||
logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
|
||||
cert_chain = self._store.certs[self.hs.hostname]
|
||||
logger.warning("Reprovisioned %s, saving.", self._acme_domain)
|
||||
cert_chain = self._store.certs[self._acme_domain]
|
||||
|
||||
try:
|
||||
with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
|
||||
|
||||
@@ -20,7 +20,11 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import FederationDeniedError
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
)
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -504,13 +508,13 @@ class DeviceListEduUpdater(object):
|
||||
origin = get_domain_from_id(user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except NotRetryingDestination:
|
||||
except (
|
||||
NotRetryingDestination, RequestSendFailed, HttpResponseException,
|
||||
):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn(
|
||||
"Failed to handle device list update for %s,"
|
||||
" we're not retrying the remote",
|
||||
user_id,
|
||||
"Failed to handle device list update for %s", user_id,
|
||||
)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
|
||||
@@ -112,7 +112,9 @@ class DirectoryHandler(BaseHandler):
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
|
||||
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias.to_string(),
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
@@ -395,9 +397,9 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if not self.spam_checker.user_may_publish_room(
|
||||
requester.user.to_string(), room_id
|
||||
):
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_publish_room(user_id, room_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This user is not permitted to publish rooms to the room list"
|
||||
@@ -415,7 +417,24 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
making_public = visibility == "public"
|
||||
if making_public:
|
||||
room_aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
canonical_alias = yield self.store.get_canonical_alias_for_room(room_id)
|
||||
if canonical_alias:
|
||||
room_aliases.append(canonical_alias)
|
||||
|
||||
if not self.config.is_publishing_room_allowed(
|
||||
user_id, room_id, room_aliases,
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(
|
||||
403, "Not allowed to publish room",
|
||||
)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, making_public)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_appservice_room_list(self, appservice_id, network_id,
|
||||
|
||||
@@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
|
||||
set(auth_events.keys()) | set(state_events.keys())
|
||||
)
|
||||
|
||||
# We now have a chunk of events plus associated state and auth chain to
|
||||
# persist. We do the persistence in two steps:
|
||||
# 1. Auth events and state get persisted as outliers, plus the
|
||||
# backward extremities get persisted (as non-outliers).
|
||||
# 2. The rest of the events in the chunk get persisted one by one, as
|
||||
# each one depends on the previous event for its state.
|
||||
#
|
||||
# The important thing is that events in the chunk get persisted as
|
||||
# non-outliers, including when those events are also in the state or
|
||||
# auth chain. Caution must therefore be taken to ensure that they are
|
||||
# not accidentally marked as outliers.
|
||||
|
||||
# Step 1a: persist auth events that *don't* appear in the chunk
|
||||
ev_infos = []
|
||||
for a in auth_events.values():
|
||||
if a.event_id in seen_events:
|
||||
# We only want to persist auth events as outliers that we haven't
|
||||
# seen and aren't about to persist as part of the backfilled chunk.
|
||||
if a.event_id in seen_events or a.event_id in event_map:
|
||||
continue
|
||||
|
||||
a.internal_metadata.outlier = True
|
||||
ev_infos.append({
|
||||
"event": a,
|
||||
@@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
})
|
||||
|
||||
# Step 1b: persist the events in the chunk we fetched state for (i.e.
|
||||
# the backwards extremities) as non-outliers.
|
||||
for e_id in events_to_state:
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
ev = event_map[e_id]
|
||||
assert(not ev.internal_metadata.is_outlier())
|
||||
|
||||
ev_infos.append({
|
||||
"event": event_map[e_id],
|
||||
"event": ev,
|
||||
"state": events_to_state[e_id],
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id in event_map[e_id].auth_event_ids()
|
||||
for a_id in ev.auth_event_ids()
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
# Step 2: Persist the rest of the events in the chunk one by one
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
for event in events:
|
||||
if event in events_to_state:
|
||||
continue
|
||||
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
assert(not event.internal_metadata.is_outlier())
|
||||
|
||||
# We store these one at a time since each event depends on the
|
||||
# previous to work out the state.
|
||||
# TODO: We can probably do something more clever here.
|
||||
|
||||
@@ -20,7 +20,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,13 +46,19 @@ def _create_rerouter(func_name):
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
def h(failure):
|
||||
def http_response_errback(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
return failure
|
||||
d.addErrback(h)
|
||||
|
||||
def request_failed_errback(failure):
|
||||
failure.trap(RequestSendFailed)
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
d.addErrback(http_response_errback)
|
||||
d.addErrback(request_failed_errback)
|
||||
return d
|
||||
return f
|
||||
|
||||
|
||||
@@ -136,7 +136,11 @@ class PaginationHandler(object):
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"[purge] failed",
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
@@ -254,7 +258,7 @@ class PaginationHandler(object):
|
||||
})
|
||||
|
||||
state = None
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
if event_filter and event_filter.lazy_load_members() and len(events) > 0:
|
||||
# TODO: remove redundant members
|
||||
|
||||
# FIXME: we also care about invite targets etc.
|
||||
|
||||
@@ -16,8 +16,8 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import logcontext
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
|
||||
if is_new:
|
||||
# fire off a process in the background to send the receipt to
|
||||
# remote servers
|
||||
self._push_remotes([receipt])
|
||||
run_as_background_process(
|
||||
'push_receipts_to_remotes', self._push_remotes, receipt
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _received_remote_receipt(self, origin, content):
|
||||
@@ -125,44 +127,42 @@ class ReceiptsHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
@logcontext.preserve_fn # caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def _push_remotes(self, receipts):
|
||||
"""Given a list of receipts, works out which remote servers should be
|
||||
def _push_remotes(self, receipt):
|
||||
"""Given a receipt, works out which remote servers should be
|
||||
poked and pokes them.
|
||||
"""
|
||||
try:
|
||||
# TODO: Some of this stuff should be coallesced.
|
||||
for receipt in receipts:
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
# TODO: optimise this to move some of the work to the workers.
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
}
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error pushing receipts to remote servers")
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -26,7 +27,14 @@ from synapse.api.errors import (
|
||||
RegistrationError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
||||
from synapse.replication.http.register import (
|
||||
ReplicationPostRegisterActionsServlet,
|
||||
ReplicationRegisterServlet,
|
||||
)
|
||||
from synapse.types import RoomAlias, RoomID, UserID, create_requester
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
@@ -51,6 +59,7 @@ class RegistrationHandler(BaseHandler):
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
self.identity_handler = self.hs.get_handlers().identity_handler
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
@@ -61,6 +70,18 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
if hs.config.worker_app:
|
||||
self._register_client = ReplicationRegisterServlet.make_client(hs)
|
||||
self._register_device_client = (
|
||||
RegisterDeviceReplicationServlet.make_client(hs)
|
||||
)
|
||||
self._post_registration_client = (
|
||||
ReplicationPostRegisterActionsServlet.make_client(hs)
|
||||
)
|
||||
else:
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
@@ -155,7 +176,7 @@ class RegistrationHandler(BaseHandler):
|
||||
yield self.auth.check_auth_blocking(threepid=threepid)
|
||||
password_hash = None
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
password_hash = yield self._auth_handler.hash(password)
|
||||
|
||||
if localpart:
|
||||
yield self.check_username(localpart, guest_access_token=guest_access_token)
|
||||
@@ -185,7 +206,7 @@ class RegistrationHandler(BaseHandler):
|
||||
token = None
|
||||
if generate_token:
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -217,7 +238,7 @@ class RegistrationHandler(BaseHandler):
|
||||
if default_display_name is None:
|
||||
default_display_name = localpart
|
||||
try:
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -316,7 +337,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, allowed_appservice=service
|
||||
)
|
||||
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
appservice_id=service_id,
|
||||
@@ -359,8 +380,7 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("validating threepidcred sid %s on id server %s",
|
||||
c['sid'], c['idServer'])
|
||||
try:
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
threepid = yield identity_handler.threepid_from_creds(c)
|
||||
threepid = yield self.identity_handler.threepid_from_creds(c)
|
||||
except Exception:
|
||||
logger.exception("Couldn't validate 3pid")
|
||||
raise RegistrationError(400, "Couldn't validate 3pid")
|
||||
@@ -384,9 +404,8 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
# Now we have a matrix ID, bind it to the threepids we were given
|
||||
for c in threepidCreds:
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
# XXX: This should be a deferred list, shouldn't it?
|
||||
yield identity_handler.bind_threepid(c, user_id)
|
||||
yield self.identity_handler.bind_threepid(c, user_id)
|
||||
|
||||
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
|
||||
# don't allow people to register the server notices mxid
|
||||
@@ -441,7 +460,7 @@ class RegistrationHandler(BaseHandler):
|
||||
lines = response.split('\n')
|
||||
json = {
|
||||
"valid": lines[0] == 'true',
|
||||
"error_url": "http://www.google.com/recaptcha/api/challenge?" +
|
||||
"error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
|
||||
"error=%s" % lines[1]
|
||||
}
|
||||
defer.returnValue(json)
|
||||
@@ -452,7 +471,7 @@ class RegistrationHandler(BaseHandler):
|
||||
Used only by c/s api v1
|
||||
"""
|
||||
data = yield self.captcha_client.post_urlencoded_get_raw(
|
||||
"http://www.google.com:80/recaptcha/api/verify",
|
||||
"http://www.recaptcha.net:80/recaptcha/api/verify",
|
||||
args={
|
||||
'privatekey': private_key,
|
||||
'remoteip': ip_addr,
|
||||
@@ -494,7 +513,7 @@ class RegistrationHandler(BaseHandler):
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
|
||||
if need_register:
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -512,9 +531,6 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
def auth_handler(self):
|
||||
return self.hs.get_auth_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
|
||||
"""Get a guest access token for a 3PID, creating a guest account if
|
||||
@@ -573,3 +589,275 @@ class RegistrationHandler(BaseHandler):
|
||||
action="join",
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
def _register_with_store(self, user_id, token=None, password_hash=None,
|
||||
was_guest=False, make_guest=False, appservice_id=None,
|
||||
create_profile_with_displayname=None, admin=False,
|
||||
user_type=None):
|
||||
"""Register user in the datastore.
|
||||
|
||||
Args:
|
||||
user_id (str): The desired user ID to register.
|
||||
token (str): The desired access token to use for this user. If this
|
||||
is not None, the given access token is associated with the user
|
||||
id.
|
||||
password_hash (str|None): Optional. The password hash for this user.
|
||||
was_guest (bool): Optional. Whether this is a guest account being
|
||||
upgraded to a non-guest account.
|
||||
make_guest (boolean): True if the the new user should be guest,
|
||||
false to add a regular user account.
|
||||
appservice_id (str|None): The ID of the appservice registering the user.
|
||||
create_profile_with_displayname (unicode|None): Optionally create a
|
||||
profile for the user, setting their displayname to the given value
|
||||
admin (boolean): is an admin user?
|
||||
user_type (str|None): type of user. One of the values from
|
||||
api.constants.UserTypes, or None for a normal user.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
if self.hs.config.worker_app:
|
||||
return self._register_client(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
appservice_id=appservice_id,
|
||||
create_profile_with_displayname=create_profile_with_displayname,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
)
|
||||
else:
|
||||
return self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
appservice_id=appservice_id,
|
||||
create_profile_with_displayname=create_profile_with_displayname,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_device(self, user_id, device_id, initial_display_name,
|
||||
is_guest=False):
|
||||
"""Register a device for a user and generate an access token.
|
||||
|
||||
Args:
|
||||
user_id (str): full canonical @user:id
|
||||
device_id (str|None): The device ID to check, or None to generate
|
||||
a new one.
|
||||
initial_display_name (str|None): An optional display name for the
|
||||
device.
|
||||
is_guest (bool): Whether this is a guest account
|
||||
|
||||
Returns:
|
||||
defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
|
||||
"""
|
||||
|
||||
if self.hs.config.worker_app:
|
||||
r = yield self._register_device_client(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_display_name=initial_display_name,
|
||||
is_guest=is_guest,
|
||||
)
|
||||
defer.returnValue((r["device_id"], r["access_token"]))
|
||||
else:
|
||||
device_id = yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
if is_guest:
|
||||
access_token = self.macaroon_gen.generate_access_token(
|
||||
user_id, ["guest = true"]
|
||||
)
|
||||
else:
|
||||
access_token = yield self._auth_handler.get_access_token_for_user_id(
|
||||
user_id, device_id=device_id,
|
||||
)
|
||||
|
||||
defer.returnValue((device_id, access_token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_registration_actions(self, user_id, auth_result, access_token,
|
||||
bind_email, bind_msisdn):
|
||||
"""A user has completed registration
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID that consented
|
||||
auth_result (dict): The authenticated credentials of the newly
|
||||
registered user.
|
||||
access_token (str|None): The access token of the newly logged in
|
||||
device, or None if `inhibit_login` enabled.
|
||||
bind_email (bool): Whether to bind the email with the identity
|
||||
server
|
||||
bind_msisdn (bool): Whether to bind the msisdn with the identity
|
||||
server
|
||||
"""
|
||||
if self.hs.config.worker_app:
|
||||
yield self._post_registration_client(
|
||||
user_id=user_id,
|
||||
auth_result=auth_result,
|
||||
access_token=access_token,
|
||||
bind_email=bind_email,
|
||||
bind_msisdn=bind_msisdn,
|
||||
)
|
||||
return
|
||||
|
||||
if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
|
||||
threepid = auth_result[LoginType.EMAIL_IDENTITY]
|
||||
# Necessary due to auth checks prior to the threepid being
|
||||
# written to the db
|
||||
if is_threepid_reserved(
|
||||
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||
):
|
||||
yield self.store.upsert_monthly_active_user(user_id)
|
||||
|
||||
yield self._register_email_threepid(
|
||||
user_id, threepid, access_token,
|
||||
bind_email,
|
||||
)
|
||||
|
||||
if auth_result and LoginType.MSISDN in auth_result:
|
||||
threepid = auth_result[LoginType.MSISDN]
|
||||
yield self._register_msisdn_threepid(
|
||||
user_id, threepid, bind_msisdn,
|
||||
)
|
||||
|
||||
if auth_result and LoginType.TERMS in auth_result:
|
||||
yield self._on_user_consented(
|
||||
user_id, self.hs.config.user_consent_version,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_user_consented(self, user_id, consent_version):
|
||||
"""A user consented to the terms on registration
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID that consented
|
||||
consent_version (str): version of the policy the user has
|
||||
consented to.
|
||||
"""
|
||||
logger.info("%s has consented to the privacy policy", user_id)
|
||||
yield self.store.user_set_consent_version(
|
||||
user_id, consent_version,
|
||||
)
|
||||
yield self.post_consent_actions(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_email_threepid(self, user_id, threepid, token, bind_email):
|
||||
"""Add an email address as a 3pid identifier
|
||||
|
||||
Also adds an email pusher for the email address, if configured in the
|
||||
HS config
|
||||
|
||||
Also optionally binds emails to the given user_id on the identity server
|
||||
|
||||
Must be called on master.
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.email.identity auth response
|
||||
token (str|None): access_token for the user, or None if not logged
|
||||
in.
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
reqd = ('medium', 'address', 'validated_at')
|
||||
if any(x not in threepid for x in reqd):
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
return
|
||||
|
||||
yield self._auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
# And we add an email pusher for them by default, but only
|
||||
# if email notifications are enabled (so people don't start
|
||||
# getting mail spam where they weren't before if email
|
||||
# notifs are set up on a home server)
|
||||
if (self.hs.config.email_enable_notifs and
|
||||
self.hs.config.email_notif_for_new_users
|
||||
and token):
|
||||
# Pull the ID of the access token back out of the db
|
||||
# It would really make more sense for this to be passed
|
||||
# up when the access token is saved, but that's quite an
|
||||
# invasive change I'd rather do separately.
|
||||
user_tuple = yield self.store.get_user_by_access_token(
|
||||
token
|
||||
)
|
||||
token_id = user_tuple["token_id"]
|
||||
|
||||
yield self.pusher_pool.add_pusher(
|
||||
user_id=user_id,
|
||||
access_token=token_id,
|
||||
kind="email",
|
||||
app_id="m.email",
|
||||
app_display_name="Email Notifications",
|
||||
device_display_name=threepid["address"],
|
||||
pushkey=threepid["address"],
|
||||
lang=None, # We don't know a user's language here
|
||||
data={},
|
||||
)
|
||||
|
||||
if bind_email:
|
||||
logger.info("bind_email specified: binding")
|
||||
logger.debug("Binding emails %s to %s" % (
|
||||
threepid, user_id
|
||||
))
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_email not specified: not binding email")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
|
||||
"""Add a phone number as a 3pid identifier
|
||||
|
||||
Also optionally binds msisdn to the given user_id on the identity server
|
||||
|
||||
Must be called on master.
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.msisdn auth response
|
||||
token (str): access_token for the user
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
try:
|
||||
assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
|
||||
except SynapseError as ex:
|
||||
if ex.errcode == Codes.MISSING_PARAM:
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
defer.returnValue(None)
|
||||
raise
|
||||
|
||||
yield self._auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
if bind_msisdn:
|
||||
logger.info("bind_msisdn specified: binding")
|
||||
logger.debug("Binding msisdn %s to %s", threepid, user_id)
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_msisdn not specified: not binding msisdn")
|
||||
|
||||
@@ -311,6 +311,28 @@ class RoomCreationHandler(BaseHandler):
|
||||
creation_content=creation_content,
|
||||
)
|
||||
|
||||
# Transfer membership events
|
||||
old_room_member_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
old_room_id, StateFilter.from_types([(EventTypes.Member, None)]),
|
||||
)
|
||||
|
||||
# map from event_id to BaseEvent
|
||||
old_room_member_state_events = yield self.store.get_events(
|
||||
old_room_member_state_ids.values(),
|
||||
)
|
||||
for k, old_event in iteritems(old_room_member_state_events):
|
||||
# Only transfer ban events
|
||||
if ("membership" in old_event.content and
|
||||
old_event.content["membership"] == "ban"):
|
||||
yield self.room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(old_event['state_key']),
|
||||
new_room_id,
|
||||
"ban",
|
||||
ratelimit=False,
|
||||
content=old_event.content,
|
||||
)
|
||||
|
||||
# XXX invites/joins
|
||||
# XXX 3pid invites
|
||||
|
||||
|
||||
@@ -50,16 +50,17 @@ class RoomListHandler(BaseHandler):
|
||||
|
||||
def get_local_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,):
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
from_federation=False):
|
||||
"""Generate a local public room list.
|
||||
|
||||
There are multiple different lists: the main one plus one per third
|
||||
party network. A client can ask for a specific list or to return all.
|
||||
|
||||
Args:
|
||||
limit (int)
|
||||
since_token (str)
|
||||
search_filter (dict)
|
||||
limit (int|None)
|
||||
since_token (str|None)
|
||||
search_filter (dict|None)
|
||||
network_tuple (ThirdPartyInstanceID): Which public list to use.
|
||||
This can be (None, None) to indicate the main list, or a particular
|
||||
appservice and network id to use an appservice specific one.
|
||||
@@ -87,14 +88,30 @@ class RoomListHandler(BaseHandler):
|
||||
return self.response_cache.wrap(
|
||||
key,
|
||||
self._get_public_room_list,
|
||||
limit, since_token, network_tuple=network_tuple,
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple, from_federation=from_federation,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
from_federation=False,
|
||||
timeout=None,):
|
||||
"""Generate a public room list.
|
||||
Args:
|
||||
limit (int|None): Maximum amount of rooms to return.
|
||||
since_token (str|None)
|
||||
search_filter (dict|None): Dictionary to filter rooms by.
|
||||
network_tuple (ThirdPartyInstanceID): Which public list to use.
|
||||
This can be (None, None) to indicate the main list, or a particular
|
||||
appservice and network id to use an appservice specific one.
|
||||
Setting to None returns all public rooms across all lists.
|
||||
from_federation (bool): Whether this request originated from a
|
||||
federating server or a client. Used for room filtering.
|
||||
timeout (int|None): Amount of seconds to wait for a response before
|
||||
timing out.
|
||||
"""
|
||||
if since_token and since_token != "END":
|
||||
since_token = RoomListNextBatch.from_token(since_token)
|
||||
else:
|
||||
@@ -217,7 +234,8 @@ class RoomListHandler(BaseHandler):
|
||||
yield concurrently_execute(
|
||||
lambda r: self._append_room_entry_to_chunk(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
chunk, limit, search_filter,
|
||||
from_federation=from_federation,
|
||||
),
|
||||
batch, 5,
|
||||
)
|
||||
@@ -288,23 +306,51 @@ class RoomListHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
|
||||
search_filter):
|
||||
search_filter, from_federation=False):
|
||||
"""Generate the entry for a room in the public room list and append it
|
||||
to the `chunk` if it matches the search filter
|
||||
|
||||
Args:
|
||||
room_id (str): The ID of the room.
|
||||
num_joined_users (int): The number of joined users in the room.
|
||||
chunk (list)
|
||||
limit (int|None): Maximum amount of rooms to display. Function will
|
||||
return if length of chunk is greater than limit + 1.
|
||||
search_filter (dict|None)
|
||||
from_federation (bool): Whether this request originated from a
|
||||
federating server or a client. Used for room filtering.
|
||||
"""
|
||||
if limit and len(chunk) > limit + 1:
|
||||
# We've already got enough, so lets just drop it.
|
||||
return
|
||||
|
||||
result = yield self.generate_room_entry(room_id, num_joined_users)
|
||||
if not result:
|
||||
return
|
||||
|
||||
if result and _matches_room_entry(result, search_filter):
|
||||
if from_federation and not result.get("m.federate", True):
|
||||
# This is a room that other servers cannot join. Do not show them
|
||||
# this room.
|
||||
return
|
||||
|
||||
if _matches_room_entry(result, search_filter):
|
||||
chunk.append(result)
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def generate_room_entry(self, room_id, num_joined_users, cache_context,
|
||||
with_alias=True, allow_private=False):
|
||||
"""Returns the entry for a room
|
||||
|
||||
Args:
|
||||
room_id (str): The room's ID.
|
||||
num_joined_users (int): Number of users in the room.
|
||||
cache_context: Information for cached responses.
|
||||
with_alias (bool): Whether to return the room's aliases in the result.
|
||||
allow_private (bool): Whether invite-only rooms should be shown.
|
||||
|
||||
Returns:
|
||||
Deferred[dict|None]: Returns a room entry as a dictionary, or None if this
|
||||
room was determined not to be shown publicly.
|
||||
"""
|
||||
result = {
|
||||
"room_id": room_id,
|
||||
@@ -318,6 +364,7 @@ class RoomListHandler(BaseHandler):
|
||||
event_map = yield self.store.get_events([
|
||||
event_id for key, event_id in iteritems(current_state_ids)
|
||||
if key[0] in (
|
||||
EventTypes.Create,
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.Name,
|
||||
EventTypes.Topic,
|
||||
@@ -334,12 +381,17 @@ class RoomListHandler(BaseHandler):
|
||||
}
|
||||
|
||||
# Double check that this is actually a public room.
|
||||
|
||||
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
join_rule = join_rules_event.content.get("join_rule", None)
|
||||
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
|
||||
defer.returnValue(None)
|
||||
|
||||
# Return whether this room is open to federation users or not
|
||||
create_event = current_state.get((EventTypes.Create, ""))
|
||||
result["m.federate"] = create_event.content.get("m.federate", True)
|
||||
|
||||
if with_alias:
|
||||
aliases = yield self.store.get_aliases_for_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
|
||||
@@ -61,7 +61,7 @@ class RoomMemberHandler(object):
|
||||
|
||||
self.federation_handler = hs.get_handlers().federation_handler
|
||||
self.directory_handler = hs.get_handlers().directory_handler
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ class UserDirectoryHandler(object):
|
||||
# Support users are for diagnostics and should not appear in the user directory.
|
||||
if not is_support:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
user_id, profile.display_name, profile.avatar_url, None
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -166,8 +166,9 @@ class UserDirectoryHandler(object):
|
||||
self.pos = deltas[-1]["stream_id"]
|
||||
|
||||
# Expose current event processing position to prometheus
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"user_dir").set(self.pos)
|
||||
synapse.metrics.event_processing_positions.labels("user_dir").set(
|
||||
self.pos
|
||||
)
|
||||
|
||||
yield self.store.update_user_directory_stream_pos(self.pos)
|
||||
|
||||
@@ -191,21 +192,25 @@ class UserDirectoryHandler(object):
|
||||
logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
|
||||
yield self._handle_initial_room(room_id)
|
||||
num_processed_rooms += 1
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
|
||||
logger.info("Processed all rooms.")
|
||||
|
||||
if self.search_all_users:
|
||||
num_processed_users = 0
|
||||
user_ids = yield self.store.get_all_local_users()
|
||||
logger.info("Doing initial update of user directory. %d users", len(user_ids))
|
||||
logger.info(
|
||||
"Doing initial update of user directory. %d users", len(user_ids)
|
||||
)
|
||||
for user_id in user_ids:
|
||||
# We add profiles for all users even if they don't match the
|
||||
# include pattern, just in case we want to change it in future
|
||||
logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
|
||||
logger.info(
|
||||
"Handling user %d/%d", num_processed_users + 1, len(user_ids)
|
||||
)
|
||||
yield self._handle_local_user(user_id)
|
||||
num_processed_users += 1
|
||||
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
|
||||
|
||||
logger.info("Processed all users")
|
||||
|
||||
@@ -224,24 +229,24 @@ class UserDirectoryHandler(object):
|
||||
if not is_in_room:
|
||||
return
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
user_ids = set(users_with_profile)
|
||||
unhandled_users = user_ids - self.initially_handled_users
|
||||
|
||||
yield self.store.add_profiles_to_user_dir(
|
||||
room_id, {
|
||||
user_id: users_with_profile[user_id] for user_id in unhandled_users
|
||||
}
|
||||
room_id,
|
||||
{user_id: users_with_profile[user_id] for user_id in unhandled_users},
|
||||
)
|
||||
|
||||
self.initially_handled_users |= unhandled_users
|
||||
|
||||
if is_public:
|
||||
yield self.store.add_users_to_public_room(
|
||||
room_id,
|
||||
user_ids=user_ids - self.initially_handled_users_in_public
|
||||
room_id, user_ids=user_ids - self.initially_handled_users_in_public
|
||||
)
|
||||
self.initially_handled_users_in_public |= user_ids
|
||||
|
||||
@@ -253,7 +258,7 @@ class UserDirectoryHandler(object):
|
||||
count = 0
|
||||
for user_id in user_ids:
|
||||
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
count += 1
|
||||
@@ -268,7 +273,7 @@ class UserDirectoryHandler(object):
|
||||
continue
|
||||
|
||||
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
count += 1
|
||||
|
||||
user_set = (user_id, other_user_id)
|
||||
@@ -290,25 +295,23 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
room_id, not is_public, to_insert
|
||||
)
|
||||
to_insert.clear()
|
||||
|
||||
if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
|
||||
to_insert.clear()
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
@@ -329,11 +332,12 @@ class UserDirectoryHandler(object):
|
||||
# may have become public or not and add/remove the users in said room
|
||||
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
|
||||
yield self._handle_room_publicity_change(
|
||||
room_id, prev_event_id, event_id, typ,
|
||||
room_id, prev_event_id, event_id, typ
|
||||
)
|
||||
elif typ == EventTypes.Member:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="membership",
|
||||
public_value=Membership.JOIN,
|
||||
)
|
||||
@@ -342,14 +346,16 @@ class UserDirectoryHandler(object):
|
||||
# Need to check if the server left the room entirely, if so
|
||||
# we might need to remove all the users in that room
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
room_id, self.server_name,
|
||||
room_id, self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
logger.info("Server left room: %r", room_id)
|
||||
# Fetch all the users that we marked as being in user
|
||||
# directory due to being in the room and then check if
|
||||
# need to remove those users or not
|
||||
user_ids = yield self.store.get_users_in_dir_due_to_room(room_id)
|
||||
user_ids = yield self.store.get_users_in_dir_due_to_room(
|
||||
room_id
|
||||
)
|
||||
for user_id in user_ids:
|
||||
yield self._handle_remove_user(room_id, user_id)
|
||||
return
|
||||
@@ -361,7 +367,7 @@ class UserDirectoryHandler(object):
|
||||
if change is None:
|
||||
# Handle any profile changes
|
||||
yield self._handle_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id,
|
||||
state_key, room_id, prev_event_id, event_id
|
||||
)
|
||||
continue
|
||||
|
||||
@@ -393,13 +399,15 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if typ == EventTypes.RoomHistoryVisibility:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="history_visibility",
|
||||
public_value="world_readable",
|
||||
)
|
||||
elif typ == EventTypes.JoinRules:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="join_rule",
|
||||
public_value=JoinRules.PUBLIC,
|
||||
)
|
||||
@@ -524,7 +532,7 @@ class UserDirectoryHandler(object):
|
||||
)
|
||||
if self.is_mine_id(other_user_id) and not is_appservice:
|
||||
shared_is_private = yield self.store.get_if_users_share_a_room(
|
||||
other_user_id, user_id,
|
||||
other_user_id, user_id
|
||||
)
|
||||
if shared_is_private is True:
|
||||
# We've already marked in the database they share a private room
|
||||
@@ -539,13 +547,11 @@ class UserDirectoryHandler(object):
|
||||
to_insert.add((other_user_id, user_id))
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -564,15 +570,15 @@ class UserDirectoryHandler(object):
|
||||
row = yield self.store.get_user_in_public_room(user_id)
|
||||
update_user_in_public = row and row["room_id"] == room_id
|
||||
|
||||
if (update_user_in_public or update_user_dir):
|
||||
if update_user_in_public or update_user_dir:
|
||||
# XXX: Make this faster?
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
for j_room_id in rooms:
|
||||
if (not update_user_in_public and not update_user_dir):
|
||||
if not update_user_in_public and not update_user_dir:
|
||||
break
|
||||
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
j_room_id, self.server_name,
|
||||
j_room_id, self.server_name
|
||||
)
|
||||
|
||||
if not is_in_room:
|
||||
@@ -600,19 +606,19 @@ class UserDirectoryHandler(object):
|
||||
# Get a list of user tuples that were in the DB due to this room and
|
||||
# users (this includes tuples where the other user matches `user_id`)
|
||||
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
|
||||
user_id, room_id,
|
||||
user_id, room_id
|
||||
)
|
||||
|
||||
for user_id, other_user_id in user_tuples:
|
||||
# For each user tuple get a list of rooms that they still share,
|
||||
# trying to find a private room, and update the entry in the DB
|
||||
rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
|
||||
rooms = yield self.store.get_rooms_in_common_for_users(
|
||||
user_id, other_user_id
|
||||
)
|
||||
|
||||
# If they dont share a room anymore, remove the mapping
|
||||
if not rooms:
|
||||
yield self.store.remove_user_who_share_room(
|
||||
user_id, other_user_id,
|
||||
)
|
||||
yield self.store.remove_user_who_share_room(user_id, other_user_id)
|
||||
continue
|
||||
|
||||
found_public_share = None
|
||||
@@ -626,13 +632,13 @@ class UserDirectoryHandler(object):
|
||||
else:
|
||||
found_public_share = None
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
room_id, not is_public, [(user_id, other_user_id)]
|
||||
)
|
||||
break
|
||||
|
||||
if found_public_share:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
room_id, not is_public, [(user_id, other_user_id)]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -660,7 +666,7 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if prev_name != new_name or prev_avatar != new_avatar:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, new_name, new_avatar, room_id,
|
||||
user_id, new_name, new_avatar, room_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
# limitations under the License.
|
||||
import re
|
||||
|
||||
from twisted.internet import task
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.python import failure
|
||||
from twisted.web.client import FileBodyProducer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
@@ -47,3 +49,16 @@ def redact_uri(uri):
|
||||
r'\1<redacted>\3',
|
||||
uri
|
||||
)
|
||||
|
||||
|
||||
class QuieterFileBodyProducer(FileBodyProducer):
|
||||
"""Wrapper for FileBodyProducer that avoids CRITICAL errors when the connection drops.
|
||||
|
||||
Workaround for https://github.com/matrix-org/synapse/issues/4003 /
|
||||
https://twistedmatrix.com/trac/ticket/6528
|
||||
"""
|
||||
def stopProducing(self):
|
||||
try:
|
||||
FileBodyProducer.stopProducing(self)
|
||||
except task.TaskStopped:
|
||||
pass
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
from six import text_type
|
||||
from six.moves import urllib
|
||||
@@ -39,7 +40,11 @@ from twisted.web.http import PotentialDataLoss
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.http import cancelled_to_request_timed_out_error, redact_uri
|
||||
from synapse.http import (
|
||||
QuieterFileBodyProducer,
|
||||
cancelled_to_request_timed_out_error,
|
||||
redact_uri,
|
||||
)
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
@@ -246,7 +251,7 @@ class SimpleHttpClient(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, data=b'', headers=None):
|
||||
def request(self, method, uri, data=None, headers=None):
|
||||
"""
|
||||
Args:
|
||||
method (str): HTTP method to use.
|
||||
@@ -265,11 +270,15 @@ class SimpleHttpClient(object):
|
||||
logger.info("Sending request %s %s", method, redact_uri(uri))
|
||||
|
||||
try:
|
||||
body_producer = None
|
||||
if data is not None:
|
||||
body_producer = QuieterFileBodyProducer(BytesIO(data))
|
||||
|
||||
request_deferred = treq.request(
|
||||
method,
|
||||
uri,
|
||||
agent=self.agent,
|
||||
data=data,
|
||||
data=body_producer,
|
||||
headers=headers,
|
||||
**self._extra_treq_args
|
||||
)
|
||||
|
||||
@@ -28,11 +28,10 @@ from canonicaljson import encode_canonical_json
|
||||
from prometheus_client import Counter
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer, protocol, task
|
||||
from twisted.internet import defer, protocol
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.internet.task import _EPSILON, Cooperator
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import FileBodyProducer
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
import synapse.metrics
|
||||
@@ -44,6 +43,7 @@ from synapse.api.errors import (
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
@@ -839,16 +839,3 @@ def encode_query_args(args):
|
||||
query_bytes = urllib.parse.urlencode(encoded_args, True)
|
||||
|
||||
return query_bytes.encode('utf8')
|
||||
|
||||
|
||||
class QuieterFileBodyProducer(FileBodyProducer):
|
||||
"""Wrapper for FileBodyProducer that avoids CRITICAL errors when the connection drops.
|
||||
|
||||
Workaround for https://github.com/matrix-org/synapse/issues/4003 /
|
||||
https://twistedmatrix.com/trac/ticket/6528
|
||||
"""
|
||||
def stopProducing(self):
|
||||
try:
|
||||
FileBodyProducer.stopProducing(self)
|
||||
except task.TaskStopped:
|
||||
pass
|
||||
|
||||
@@ -106,10 +106,10 @@ def wrap_json_request_handler(h):
|
||||
# trace.
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed handle request via %r: %r: %s",
|
||||
h,
|
||||
"Failed handle request via %r: %r",
|
||||
request.request_metrics.name,
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
# Only respond with an error response if we haven't already started
|
||||
# writing, otherwise lets just kill the connection
|
||||
@@ -169,18 +169,18 @@ def _return_html_error(f, request):
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"Failed handle request %r: %s",
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
else:
|
||||
code = http_client.INTERNAL_SERVER_ERROR
|
||||
msg = "Internal server error"
|
||||
|
||||
logger.error(
|
||||
"Failed handle request %r: %s",
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
body = HTML_ERROR_TEMPLATE.format(
|
||||
|
||||
@@ -274,8 +274,6 @@ pending_calls_metric = Histogram(
|
||||
# Federation Metrics
|
||||
#
|
||||
|
||||
sent_edus_counter = Counter("synapse_federation_client_sent_edus", "")
|
||||
|
||||
sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
|
||||
|
||||
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
|
||||
|
||||
@@ -79,7 +79,7 @@ class ModuleApi(object):
|
||||
Returns:
|
||||
Deferred: a 2-tuple of (user_id, access_token)
|
||||
"""
|
||||
reg = self.hs.get_handlers().registration_handler
|
||||
reg = self.hs.get_registration_handler()
|
||||
return reg.register(localpart=localpart)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -32,9 +32,25 @@ if six.PY3:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
|
||||
http_push_processed_counter = Counter(
|
||||
"synapse_http_httppusher_http_pushes_processed",
|
||||
"Number of push notifications successfully sent",
|
||||
)
|
||||
|
||||
http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
|
||||
http_push_failed_counter = Counter(
|
||||
"synapse_http_httppusher_http_pushes_failed",
|
||||
"Number of push notifications which failed",
|
||||
)
|
||||
|
||||
http_badges_processed_counter = Counter(
|
||||
"synapse_http_httppusher_badge_updates_processed",
|
||||
"Number of badge updates successfully sent",
|
||||
)
|
||||
|
||||
http_badges_failed_counter = Counter(
|
||||
"synapse_http_httppusher_badge_updates_failed",
|
||||
"Number of badge updates which failed",
|
||||
)
|
||||
|
||||
|
||||
class HttpPusher(object):
|
||||
@@ -81,6 +97,11 @@ class HttpPusher(object):
|
||||
pusherdict['pushkey'],
|
||||
)
|
||||
|
||||
if self.data is None:
|
||||
raise PusherConfigException(
|
||||
"data can not be null for HTTP pusher"
|
||||
)
|
||||
|
||||
if 'url' not in self.data:
|
||||
raise PusherConfigException(
|
||||
"'url' required in data for HTTP pusher"
|
||||
@@ -333,10 +354,10 @@ class HttpPusher(object):
|
||||
defer.returnValue([])
|
||||
try:
|
||||
resp = yield self.http_client.post_json_get_json(self.url, notification_dict)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
"Failed to push event %s to %s",
|
||||
event.event_id, self.name, exc_info=True,
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to push event %s to %s: %s %s",
|
||||
event.event_id, self.name, type(e), e,
|
||||
)
|
||||
defer.returnValue(False)
|
||||
rejected = []
|
||||
@@ -346,6 +367,10 @@ class HttpPusher(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_badge(self, badge):
|
||||
"""
|
||||
Args:
|
||||
badge (int): number of unread messages
|
||||
"""
|
||||
logger.info("Sending updated badge count %d to %s", badge, self.name)
|
||||
d = {
|
||||
'notification': {
|
||||
@@ -366,14 +391,11 @@ class HttpPusher(object):
|
||||
}
|
||||
}
|
||||
try:
|
||||
resp = yield self.http_client.post_json_get_json(self.url, d)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
"Failed to send badge count to %s",
|
||||
self.name, exc_info=True,
|
||||
yield self.http_client.post_json_get_json(self.url, d)
|
||||
http_badges_processed_counter.inc()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to send badge count to %s: %s %s",
|
||||
self.name, type(e), e,
|
||||
)
|
||||
defer.returnValue(False)
|
||||
rejected = []
|
||||
if 'rejected' in resp:
|
||||
rejected = resp['rejected']
|
||||
defer.returnValue(rejected)
|
||||
http_badges_failed_counter.inc()
|
||||
|
||||
@@ -52,11 +52,12 @@ class PusherFactory(object):
|
||||
logger.info("defined email pusher type")
|
||||
|
||||
def create_pusher(self, pusherdict):
|
||||
logger.info("trying to create_pusher for %r", pusherdict)
|
||||
|
||||
if pusherdict['kind'] in self.pusher_types:
|
||||
logger.info("found pusher")
|
||||
return self.pusher_types[pusherdict['kind']](self.hs, pusherdict)
|
||||
kind = pusherdict['kind']
|
||||
f = self.pusher_types.get(kind, None)
|
||||
if not f:
|
||||
return None
|
||||
logger.debug("creating %s pusher for %r", kind, pusherdict)
|
||||
return f(self.hs, pusherdict)
|
||||
|
||||
def _create_email_pusher(self, _hs, pusherdict):
|
||||
app_name = self._app_name_from_pusherdict(pusherdict)
|
||||
|
||||
@@ -19,6 +19,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.push import PusherConfigException
|
||||
from synapse.push.pusher import PusherFactory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -140,6 +141,10 @@ class PusherPool:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_new_notifications(self, min_stream_id, max_stream_id):
|
||||
if not self.pushers:
|
||||
# nothing to do here.
|
||||
return
|
||||
|
||||
try:
|
||||
users_affected = yield self.store.get_push_action_users_in_range(
|
||||
min_stream_id, max_stream_id
|
||||
@@ -155,6 +160,10 @@ class PusherPool:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
|
||||
if not self.pushers:
|
||||
# nothing to do here.
|
||||
return
|
||||
|
||||
try:
|
||||
# Need to subtract 1 from the minimum because the lower bound here
|
||||
# is not inclusive
|
||||
@@ -214,6 +223,15 @@ class PusherPool:
|
||||
"""
|
||||
try:
|
||||
p = self.pusher_factory.create_pusher(pusherdict)
|
||||
except PusherConfigException as e:
|
||||
logger.warning(
|
||||
"Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
|
||||
pusherdict.get('user_name'),
|
||||
pusherdict.get('app_id'),
|
||||
pusherdict.get('pushkey'),
|
||||
e,
|
||||
)
|
||||
return
|
||||
except Exception:
|
||||
logger.exception("Couldn't start a pusher: caught Exception")
|
||||
return
|
||||
|
||||
@@ -86,6 +86,7 @@ CONDITIONAL_REQUIREMENTS = {
|
||||
"saml2": ["pysaml2>=4.5.0"],
|
||||
"url_preview": ["lxml>=3.5.0"],
|
||||
"test": ["mock>=2.0", "parameterized"],
|
||||
"sentry": ["sentry-sdk>=0.7.2"],
|
||||
}
|
||||
|
||||
|
||||
@@ -143,9 +144,12 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
||||
for dependency in OPTS:
|
||||
try:
|
||||
_get_distribution(dependency)
|
||||
except VersionConflict:
|
||||
except VersionConflict as e:
|
||||
deps_needed.append(dependency)
|
||||
errors.append("Needed %s but it was not installed" % (dependency,))
|
||||
errors.append(
|
||||
"Needed optional %s, got %s==%s"
|
||||
% (dependency, e.dist.project_name, e.dist.version)
|
||||
)
|
||||
except DistributionNotFound:
|
||||
# If it's not found, we don't care
|
||||
pass
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.replication.http import federation, membership, send_event
|
||||
from synapse.replication.http import federation, login, membership, register, send_event
|
||||
|
||||
REPLICATION_PREFIX = "/_synapse/replication"
|
||||
|
||||
@@ -28,3 +28,5 @@ class ReplicationRestResource(JsonResource):
|
||||
send_event.register_servlets(hs, self)
|
||||
membership.register_servlets(hs, self)
|
||||
federation.register_servlets(hs, self)
|
||||
login.register_servlets(hs, self)
|
||||
register.register_servlets(hs, self)
|
||||
|
||||
74
synapse/replication/http/login.py
Normal file
74
synapse/replication/http/login.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
"""Ensure a device is registered, generating a new access token for the
|
||||
device.
|
||||
|
||||
Used during registration and login.
|
||||
"""
|
||||
|
||||
NAME = "device_check_registered"
|
||||
PATH_ARGS = ("user_id",)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(RegisterDeviceReplicationServlet, self).__init__(hs)
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
@staticmethod
|
||||
def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
|
||||
"""
|
||||
Args:
|
||||
device_id (str|None): Device ID to use, if None a new one is
|
||||
generated.
|
||||
initial_display_name (str|None)
|
||||
is_guest (bool)
|
||||
"""
|
||||
return {
|
||||
"device_id": device_id,
|
||||
"initial_display_name": initial_display_name,
|
||||
"is_guest": is_guest,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
device_id = content["device_id"]
|
||||
initial_display_name = content["initial_display_name"]
|
||||
is_guest = content["is_guest"]
|
||||
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest,
|
||||
)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"device_id": device_id,
|
||||
"access_token": access_token,
|
||||
}))
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
RegisterDeviceReplicationServlet(hs).register(http_server)
|
||||
@@ -191,7 +191,7 @@ class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint):
|
||||
def __init__(self, hs):
|
||||
super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs)
|
||||
|
||||
self.registeration_handler = hs.get_handlers().registration_handler
|
||||
self.registeration_handler = hs.get_registration_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -251,7 +251,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
||||
def __init__(self, hs):
|
||||
super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__(hs)
|
||||
|
||||
self.registeration_handler = hs.get_handlers().registration_handler
|
||||
self.registeration_handler = hs.get_registration_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.clock = hs.get_clock()
|
||||
self.distributor = hs.get_distributor()
|
||||
|
||||
146
synapse/replication/http/register.py
Normal file
146
synapse/replication/http/register.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReplicationRegisterServlet(ReplicationEndpoint):
|
||||
"""Register a new user
|
||||
"""
|
||||
|
||||
NAME = "register_user"
|
||||
PATH_ARGS = ("user_id",)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ReplicationRegisterServlet, self).__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@staticmethod
|
||||
def _serialize_payload(
|
||||
user_id, token, password_hash, was_guest, make_guest, appservice_id,
|
||||
create_profile_with_displayname, admin, user_type,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
user_id (str): The desired user ID to register.
|
||||
token (str): The desired access token to use for this user. If this
|
||||
is not None, the given access token is associated with the user
|
||||
id.
|
||||
password_hash (str|None): Optional. The password hash for this user.
|
||||
was_guest (bool): Optional. Whether this is a guest account being
|
||||
upgraded to a non-guest account.
|
||||
make_guest (boolean): True if the the new user should be guest,
|
||||
false to add a regular user account.
|
||||
appservice_id (str|None): The ID of the appservice registering the user.
|
||||
create_profile_with_displayname (unicode|None): Optionally create a
|
||||
profile for the user, setting their displayname to the given value
|
||||
admin (boolean): is an admin user?
|
||||
user_type (str|None): type of user. One of the values from
|
||||
api.constants.UserTypes, or None for a normal user.
|
||||
"""
|
||||
return {
|
||||
"token": token,
|
||||
"password_hash": password_hash,
|
||||
"was_guest": was_guest,
|
||||
"make_guest": make_guest,
|
||||
"appservice_id": appservice_id,
|
||||
"create_profile_with_displayname": create_profile_with_displayname,
|
||||
"admin": admin,
|
||||
"user_type": user_type,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=content["token"],
|
||||
password_hash=content["password_hash"],
|
||||
was_guest=content["was_guest"],
|
||||
make_guest=content["make_guest"],
|
||||
appservice_id=content["appservice_id"],
|
||||
create_profile_with_displayname=content["create_profile_with_displayname"],
|
||||
admin=content["admin"],
|
||||
user_type=content["user_type"],
|
||||
)
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
|
||||
"""Run any post registration actions
|
||||
"""
|
||||
|
||||
NAME = "post_register"
|
||||
PATH_ARGS = ("user_id",)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ReplicationPostRegisterActionsServlet, self).__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
@staticmethod
|
||||
def _serialize_payload(user_id, auth_result, access_token, bind_email,
|
||||
bind_msisdn):
|
||||
"""
|
||||
Args:
|
||||
user_id (str): The user ID that consented
|
||||
auth_result (dict): The authenticated credentials of the newly
|
||||
registered user.
|
||||
access_token (str|None): The access token of the newly logged in
|
||||
device, or None if `inhibit_login` enabled.
|
||||
bind_email (bool): Whether to bind the email with the identity
|
||||
server
|
||||
bind_msisdn (bool): Whether to bind the msisdn with the identity
|
||||
server
|
||||
"""
|
||||
return {
|
||||
"auth_result": auth_result,
|
||||
"access_token": access_token,
|
||||
"bind_email": bind_email,
|
||||
"bind_msisdn": bind_msisdn,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
auth_result = content["auth_result"]
|
||||
access_token = content["access_token"]
|
||||
bind_email = content["bind_email"]
|
||||
bind_msisdn = content["bind_msisdn"]
|
||||
|
||||
yield self.registration_handler.post_registration_actions(
|
||||
user_id=user_id,
|
||||
auth_result=auth_result,
|
||||
access_token=access_token,
|
||||
bind_email=bind_email,
|
||||
bind_msisdn=bind_msisdn,
|
||||
)
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
ReplicationRegisterServlet(hs).register(http_server)
|
||||
ReplicationPostRegisterActionsServlet(hs).register(http_server)
|
||||
@@ -17,7 +17,7 @@ import logging
|
||||
|
||||
import six
|
||||
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage._base import _CURRENT_STATE_CACHE_NAME, SQLBaseStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
|
||||
from ._slaved_id_tracker import SlavedIdTracker
|
||||
@@ -54,12 +54,12 @@ class BaseSlavedStore(SQLBaseStore):
|
||||
if stream_name == "caches":
|
||||
self._cache_id_gen.advance(token)
|
||||
for row in rows:
|
||||
try:
|
||||
getattr(self, row.cache_func).invalidate(tuple(row.keys))
|
||||
except AttributeError:
|
||||
# We probably haven't pulled in the cache in this worker,
|
||||
# which is fine.
|
||||
pass
|
||||
if row.cache_func == _CURRENT_STATE_CACHE_NAME:
|
||||
room_id = row.keys[0]
|
||||
members_changed = set(row.keys[1:])
|
||||
self._invalidate_state_caches(room_id, members_changed)
|
||||
else:
|
||||
self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
|
||||
|
||||
def _invalidate_cache_and_stream(self, txn, cache_func, keys):
|
||||
txn.call_after(cache_func.invalidate, keys)
|
||||
|
||||
@@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
if "\n" in string:
|
||||
raise Exception("Unexpected newline in command: %r", string)
|
||||
|
||||
self.sendLine(string.encode("utf-8"))
|
||||
encoded_string = string.encode("utf-8")
|
||||
|
||||
if len(encoded_string) > self.MAX_LENGTH:
|
||||
raise Exception(
|
||||
"Failed to send command %s as too long (%d > %d)" % (
|
||||
cmd.NAME,
|
||||
len(encoded_string), self.MAX_LENGTH,
|
||||
)
|
||||
)
|
||||
|
||||
self.sendLine(encoded_string)
|
||||
|
||||
self.last_sent_command = self.clock.time_msec()
|
||||
|
||||
@@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
def id(self):
|
||||
return "%s-%s" % (self.name, self.conn_id)
|
||||
|
||||
def lineLengthExceeded(self, line):
|
||||
"""Called when we receive a line that is above the maximum line length
|
||||
"""
|
||||
self.send_error("Line length exceeded")
|
||||
|
||||
|
||||
class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
|
||||
|
||||
@@ -94,7 +94,7 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
self.jwt_algorithm = hs.config.jwt_algorithm
|
||||
self.cas_enabled = hs.config.cas_enabled
|
||||
self.auth_handler = self.hs.get_auth_handler()
|
||||
self.device_handler = self.hs.get_device_handler()
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.handlers = hs.get_handlers()
|
||||
self._well_known_builder = WellKnownBuilder(hs)
|
||||
|
||||
@@ -220,11 +220,10 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
login_submission,
|
||||
)
|
||||
|
||||
device_id = yield self._register_device(
|
||||
canonical_user_id, login_submission,
|
||||
)
|
||||
access_token = yield auth_handler.get_access_token_for_user_id(
|
||||
canonical_user_id, device_id,
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
canonical_user_id, device_id, initial_display_name,
|
||||
)
|
||||
|
||||
result = {
|
||||
@@ -246,10 +245,13 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
user_id = (
|
||||
yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
|
||||
)
|
||||
device_id = yield self._register_device(user_id, login_submission)
|
||||
access_token = yield auth_handler.get_access_token_for_user_id(
|
||||
user_id, device_id,
|
||||
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name,
|
||||
)
|
||||
|
||||
result = {
|
||||
"user_id": user_id, # may have changed
|
||||
"access_token": access_token,
|
||||
@@ -286,11 +288,10 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
auth_handler = self.auth_handler
|
||||
registered_user_id = yield auth_handler.check_user_exists(user_id)
|
||||
if registered_user_id:
|
||||
device_id = yield self._register_device(
|
||||
registered_user_id, login_submission
|
||||
)
|
||||
access_token = yield auth_handler.get_access_token_for_user_id(
|
||||
registered_user_id, device_id,
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
registered_user_id, device_id, initial_display_name,
|
||||
)
|
||||
|
||||
result = {
|
||||
@@ -299,12 +300,16 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
"home_server": self.hs.hostname,
|
||||
}
|
||||
else:
|
||||
# TODO: we should probably check that the register isn't going
|
||||
# to fonx/change our user_id before registering the device
|
||||
device_id = yield self._register_device(user_id, login_submission)
|
||||
user_id, access_token = (
|
||||
yield self.handlers.registration_handler.register(localpart=user)
|
||||
)
|
||||
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
registered_user_id, device_id, initial_display_name,
|
||||
)
|
||||
|
||||
result = {
|
||||
"user_id": user_id, # may have changed
|
||||
"access_token": access_token,
|
||||
@@ -313,26 +318,6 @@ class LoginRestServlet(ClientV1RestServlet):
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
def _register_device(self, user_id, login_submission):
|
||||
"""Register a device for a user.
|
||||
|
||||
This is called after the user's credentials have been validated, but
|
||||
before the access token has been issued.
|
||||
|
||||
Args:
|
||||
(str) user_id: full canonical @user:id
|
||||
(object) login_submission: dictionary supplied to /login call, from
|
||||
which we pull device_id and initial_device_name
|
||||
Returns:
|
||||
defer.Deferred: (str) device_id
|
||||
"""
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get(
|
||||
"initial_device_display_name")
|
||||
return self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
|
||||
|
||||
class CasRedirectServlet(RestServlet):
|
||||
PATTERNS = client_path_patterns("/login/(cas|sso)/redirect")
|
||||
@@ -449,7 +434,7 @@ class SSOAuthHandler(object):
|
||||
def __init__(self, hs):
|
||||
self._hostname = hs.hostname
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._registration_handler = hs.get_handlers().registration_handler
|
||||
self._registration_handler = hs.get_registration_handler()
|
||||
self._macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -33,7 +33,7 @@ RECAPTCHA_TEMPLATE = """
|
||||
<title>Authentication</title>
|
||||
<meta name='viewport' content='width=device-width, initial-scale=1,
|
||||
user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
|
||||
<script src="https://www.google.com/recaptcha/api.js"
|
||||
<script src="https://www.recaptcha.net/recaptcha/api.js"
|
||||
async defer></script>
|
||||
<script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
|
||||
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
|
||||
@@ -129,7 +129,7 @@ class AuthRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
def on_GET(self, request, stagetype):
|
||||
session = parse_string(request, "session")
|
||||
|
||||
@@ -145,7 +145,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
|
||||
"""
|
||||
super(UsernameAvailabilityRestServlet, self).__init__()
|
||||
self.hs = hs
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
hs.get_clock(),
|
||||
# Time window of 2s
|
||||
@@ -187,10 +187,9 @@ class RegisterRestServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.identity_handler = hs.get_handlers().identity_handler
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
@interactive_auth_handler
|
||||
@@ -390,8 +389,7 @@ class RegisterRestServlet(RestServlet):
|
||||
registered_user_id
|
||||
)
|
||||
# don't re-register the threepids
|
||||
add_email = False
|
||||
add_msisdn = False
|
||||
registered = False
|
||||
else:
|
||||
# NB: This may be from the auth handler and NOT from the POST
|
||||
assert_params_in_dict(params, ["password"])
|
||||
@@ -428,34 +426,21 @@ class RegisterRestServlet(RestServlet):
|
||||
session_id, "registered_user_id", registered_user_id
|
||||
)
|
||||
|
||||
add_email = True
|
||||
add_msisdn = True
|
||||
registered = True
|
||||
|
||||
return_dict = yield self._create_registration_details(
|
||||
registered_user_id, params
|
||||
)
|
||||
|
||||
if add_email and auth_result and LoginType.EMAIL_IDENTITY in auth_result:
|
||||
threepid = auth_result[LoginType.EMAIL_IDENTITY]
|
||||
yield self._register_email_threepid(
|
||||
registered_user_id, threepid, return_dict["access_token"],
|
||||
params.get("bind_email")
|
||||
if registered:
|
||||
yield self.registration_handler.post_registration_actions(
|
||||
user_id=registered_user_id,
|
||||
auth_result=auth_result,
|
||||
access_token=return_dict.get("access_token"),
|
||||
bind_email=params.get("bind_email"),
|
||||
bind_msisdn=params.get("bind_msisdn"),
|
||||
)
|
||||
|
||||
if add_msisdn and auth_result and LoginType.MSISDN in auth_result:
|
||||
threepid = auth_result[LoginType.MSISDN]
|
||||
yield self._register_msisdn_threepid(
|
||||
registered_user_id, threepid, return_dict["access_token"],
|
||||
params.get("bind_msisdn")
|
||||
)
|
||||
|
||||
if auth_result and LoginType.TERMS in auth_result:
|
||||
logger.info("%s has consented to the privacy policy" % registered_user_id)
|
||||
yield self.store.user_set_consent_version(
|
||||
registered_user_id, self.hs.config.user_consent_version,
|
||||
)
|
||||
yield self.registration_handler.post_consent_actions(registered_user_id)
|
||||
|
||||
defer.returnValue((200, return_dict))
|
||||
|
||||
def on_OPTIONS(self, _):
|
||||
@@ -506,115 +491,6 @@ class RegisterRestServlet(RestServlet):
|
||||
result = yield self._create_registration_details(user_id, body)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_email_threepid(self, user_id, threepid, token, bind_email):
|
||||
"""Add an email address as a 3pid identifier
|
||||
|
||||
Also adds an email pusher for the email address, if configured in the
|
||||
HS config
|
||||
|
||||
Also optionally binds emails to the given user_id on the identity server
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.email.identity auth response
|
||||
token (str): access_token for the user
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
reqd = ('medium', 'address', 'validated_at')
|
||||
if any(x not in threepid for x in reqd):
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
return
|
||||
|
||||
yield self.auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
# And we add an email pusher for them by default, but only
|
||||
# if email notifications are enabled (so people don't start
|
||||
# getting mail spam where they weren't before if email
|
||||
# notifs are set up on a home server)
|
||||
if (self.hs.config.email_enable_notifs and
|
||||
self.hs.config.email_notif_for_new_users):
|
||||
# Pull the ID of the access token back out of the db
|
||||
# It would really make more sense for this to be passed
|
||||
# up when the access token is saved, but that's quite an
|
||||
# invasive change I'd rather do separately.
|
||||
user_tuple = yield self.store.get_user_by_access_token(
|
||||
token
|
||||
)
|
||||
token_id = user_tuple["token_id"]
|
||||
|
||||
yield self.hs.get_pusherpool().add_pusher(
|
||||
user_id=user_id,
|
||||
access_token=token_id,
|
||||
kind="email",
|
||||
app_id="m.email",
|
||||
app_display_name="Email Notifications",
|
||||
device_display_name=threepid["address"],
|
||||
pushkey=threepid["address"],
|
||||
lang=None, # We don't know a user's language here
|
||||
data={},
|
||||
)
|
||||
|
||||
if bind_email:
|
||||
logger.info("bind_email specified: binding")
|
||||
logger.debug("Binding emails %s to %s" % (
|
||||
threepid, user_id
|
||||
))
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_email not specified: not binding email")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_msisdn_threepid(self, user_id, threepid, token, bind_msisdn):
|
||||
"""Add a phone number as a 3pid identifier
|
||||
|
||||
Also optionally binds msisdn to the given user_id on the identity server
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.msisdn auth response
|
||||
token (str): access_token for the user
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
try:
|
||||
assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
|
||||
except SynapseError as ex:
|
||||
if ex.errcode == Codes.MISSING_PARAM:
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
defer.returnValue(None)
|
||||
raise
|
||||
|
||||
yield self.auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
if bind_msisdn:
|
||||
logger.info("bind_msisdn specified: binding")
|
||||
logger.debug("Binding msisdn %s to %s", threepid, user_id)
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_msisdn not specified: not binding msisdn")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_registration_details(self, user_id, params):
|
||||
"""Complete registration of newly-registered user
|
||||
@@ -633,12 +509,10 @@ class RegisterRestServlet(RestServlet):
|
||||
"home_server": self.hs.hostname,
|
||||
}
|
||||
if not params.get("inhibit_login", False):
|
||||
device_id = yield self._register_device(user_id, params)
|
||||
|
||||
access_token = (
|
||||
yield self.auth_handler.get_access_token_for_user_id(
|
||||
user_id, device_id=device_id,
|
||||
)
|
||||
device_id = params.get("device_id")
|
||||
initial_display_name = params.get("initial_device_display_name")
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest=False,
|
||||
)
|
||||
|
||||
result.update({
|
||||
@@ -647,26 +521,6 @@ class RegisterRestServlet(RestServlet):
|
||||
})
|
||||
defer.returnValue(result)
|
||||
|
||||
def _register_device(self, user_id, params):
|
||||
"""Register a device for a user.
|
||||
|
||||
This is called after the user's credentials have been validated, but
|
||||
before the access token has been issued.
|
||||
|
||||
Args:
|
||||
(str) user_id: full canonical @user:id
|
||||
(object) params: registration parameters, from which we pull
|
||||
device_id and initial_device_name
|
||||
Returns:
|
||||
defer.Deferred: (str) device_id
|
||||
"""
|
||||
# register the user's device
|
||||
device_id = params.get("device_id")
|
||||
initial_display_name = params.get("initial_device_display_name")
|
||||
return self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_guest_registration(self, params):
|
||||
if not self.hs.config.allow_guest_access:
|
||||
@@ -680,13 +534,10 @@ class RegisterRestServlet(RestServlet):
|
||||
# we have nowhere to store it.
|
||||
device_id = synapse.api.auth.GUEST_DEVICE_ID
|
||||
initial_display_name = params.get("initial_device_display_name")
|
||||
yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest=True,
|
||||
)
|
||||
|
||||
access_token = self.macaroon_gen.generate_access_token(
|
||||
user_id, ["guest = true"]
|
||||
)
|
||||
defer.returnValue((200, {
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
|
||||
@@ -89,7 +89,7 @@ class ConsentResource(Resource):
|
||||
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
# this is required by the request_handler wrapper
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -133,8 +133,15 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
|
||||
|
||||
logger.debug("Responding to media request with responder %s")
|
||||
add_file_headers(request, media_type, file_size, upload_name)
|
||||
with responder:
|
||||
yield responder.write_to_consumer(request)
|
||||
try:
|
||||
with responder:
|
||||
yield responder.write_to_consumer(request)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
# in that case.
|
||||
logger.warning("Failed to write to consumer: %s %s", type(e), e)
|
||||
|
||||
finish_request(request)
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@ import logging
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.http.server import set_cors_headers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -59,6 +61,7 @@ class WellKnownResource(Resource):
|
||||
self._well_known_builder = WellKnownBuilder(hs)
|
||||
|
||||
def render_GET(self, request):
|
||||
set_cors_headers(request)
|
||||
r = self._well_known_builder.get_well_known()
|
||||
if not r:
|
||||
request.setResponseCode(404)
|
||||
|
||||
@@ -64,6 +64,7 @@ from synapse.handlers.presence import PresenceHandler
|
||||
from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
|
||||
from synapse.handlers.read_marker import ReadMarkerHandler
|
||||
from synapse.handlers.receipts import ReceiptsHandler
|
||||
from synapse.handlers.register import RegistrationHandler
|
||||
from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
|
||||
from synapse.handlers.room_list import RoomListHandler
|
||||
from synapse.handlers.room_member import RoomMemberMasterHandler
|
||||
@@ -181,6 +182,7 @@ class HomeServer(object):
|
||||
'pagination_handler',
|
||||
'room_context_handler',
|
||||
'sendmail',
|
||||
'registration_handler',
|
||||
]
|
||||
|
||||
# This is overridden in derived application classes
|
||||
@@ -481,6 +483,9 @@ class HomeServer(object):
|
||||
def build_room_context_handler(self):
|
||||
return RoomContextHandler(self)
|
||||
|
||||
def build_registration_handler(self):
|
||||
return RegistrationHandler(self)
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
|
||||
<link rel="stylesheet" href="style.css">
|
||||
<script src="js/jquery-2.1.3.min.js"></script>
|
||||
<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script>
|
||||
<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
|
||||
<script src="register_config.js"></script>
|
||||
<script src="js/register.js"></script>
|
||||
</head>
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import itertools
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
@@ -28,6 +29,8 @@ from twisted.internet import defer
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import batch_iter
|
||||
from synapse.util.caches.descriptors import Cache
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.stringutils import exception_to_unicode
|
||||
@@ -64,6 +67,10 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
||||
"event_search": "event_search_event_id_idx",
|
||||
}
|
||||
|
||||
# This is a special cache name we use to batch multiple invalidations of caches
|
||||
# based on the current state when notifying workers over replication.
|
||||
_CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
|
||||
|
||||
|
||||
class LoggingTransaction(object):
|
||||
"""An object that almost-transparently proxies for the 'txn' object
|
||||
@@ -100,6 +107,14 @@ class LoggingTransaction(object):
|
||||
def __iter__(self):
|
||||
return self.txn.__iter__()
|
||||
|
||||
def execute_batch(self, sql, args):
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
from psycopg2.extras import execute_batch
|
||||
self._do_execute(lambda *x: execute_batch(self.txn, *x), sql, args)
|
||||
else:
|
||||
for val in args:
|
||||
self.execute(sql, val)
|
||||
|
||||
def execute(self, sql, *args):
|
||||
self._do_execute(self.txn.execute, sql, *args)
|
||||
|
||||
@@ -693,20 +708,34 @@ class SQLBaseStore(object):
|
||||
else:
|
||||
return "%s = ?" % (key,)
|
||||
|
||||
# First try to update.
|
||||
sql = "UPDATE %s SET %s WHERE %s" % (
|
||||
table,
|
||||
", ".join("%s = ?" % (k,) for k in values),
|
||||
" AND ".join(_getwhere(k) for k in keyvalues)
|
||||
)
|
||||
sqlargs = list(values.values()) + list(keyvalues.values())
|
||||
if not values:
|
||||
# If `values` is empty, then all of the values we care about are in
|
||||
# the unique key, so there is nothing to UPDATE. We can just do a
|
||||
# SELECT instead to see if it exists.
|
||||
sql = "SELECT 1 FROM %s WHERE %s" % (
|
||||
table,
|
||||
" AND ".join(_getwhere(k) for k in keyvalues)
|
||||
)
|
||||
sqlargs = list(keyvalues.values())
|
||||
txn.execute(sql, sqlargs)
|
||||
if txn.fetchall():
|
||||
# We have an existing record.
|
||||
return False
|
||||
else:
|
||||
# First try to update.
|
||||
sql = "UPDATE %s SET %s WHERE %s" % (
|
||||
table,
|
||||
", ".join("%s = ?" % (k,) for k in values),
|
||||
" AND ".join(_getwhere(k) for k in keyvalues)
|
||||
)
|
||||
sqlargs = list(values.values()) + list(keyvalues.values())
|
||||
|
||||
txn.execute(sql, sqlargs)
|
||||
if txn.rowcount > 0:
|
||||
# successfully updated at least one row.
|
||||
return False
|
||||
txn.execute(sql, sqlargs)
|
||||
if txn.rowcount > 0:
|
||||
# successfully updated at least one row.
|
||||
return False
|
||||
|
||||
# We didn't update any rows so insert a new one
|
||||
# We didn't find any existing rows, so insert a new one
|
||||
allvalues = {}
|
||||
allvalues.update(keyvalues)
|
||||
allvalues.update(values)
|
||||
@@ -753,6 +782,106 @@ class SQLBaseStore(object):
|
||||
)
|
||||
txn.execute(sql, list(allvalues.values()))
|
||||
|
||||
def _simple_upsert_many_txn(
|
||||
self, txn, table, key_names, key_values, value_names, value_values
|
||||
):
|
||||
"""
|
||||
Upsert, many times.
|
||||
|
||||
Args:
|
||||
table (str): The table to upsert into
|
||||
key_names (list[str]): The key column names.
|
||||
key_values (list[list]): A list of each row's key column values.
|
||||
value_names (list[str]): The value column names. If empty, no
|
||||
values will be used, even if value_values is provided.
|
||||
value_values (list[list]): A list of each row's value column values.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if (
|
||||
self.database_engine.can_native_upsert
|
||||
and table not in self._unsafe_to_upsert_tables
|
||||
):
|
||||
return self._simple_upsert_many_txn_native_upsert(
|
||||
txn, table, key_names, key_values, value_names, value_values
|
||||
)
|
||||
else:
|
||||
return self._simple_upsert_many_txn_emulated(
|
||||
txn, table, key_names, key_values, value_names, value_values
|
||||
)
|
||||
|
||||
def _simple_upsert_many_txn_emulated(
|
||||
self, txn, table, key_names, key_values, value_names, value_values
|
||||
):
|
||||
"""
|
||||
Upsert, many times, but without native UPSERT support or batching.
|
||||
|
||||
Args:
|
||||
table (str): The table to upsert into
|
||||
key_names (list[str]): The key column names.
|
||||
key_values (list[list]): A list of each row's key column values.
|
||||
value_names (list[str]): The value column names. If empty, no
|
||||
values will be used, even if value_values is provided.
|
||||
value_values (list[list]): A list of each row's value column values.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# No value columns, therefore make a blank list so that the following
|
||||
# zip() works correctly.
|
||||
if not value_names:
|
||||
value_values = [() for x in range(len(key_values))]
|
||||
|
||||
for keyv, valv in zip(key_values, value_values):
|
||||
_keys = {x: y for x, y in zip(key_names, keyv)}
|
||||
_vals = {x: y for x, y in zip(value_names, valv)}
|
||||
|
||||
self._simple_upsert_txn_emulated(txn, table, _keys, _vals)
|
||||
|
||||
def _simple_upsert_many_txn_native_upsert(
|
||||
self, txn, table, key_names, key_values, value_names, value_values
|
||||
):
|
||||
"""
|
||||
Upsert, many times, using batching where possible.
|
||||
|
||||
Args:
|
||||
table (str): The table to upsert into
|
||||
key_names (list[str]): The key column names.
|
||||
key_values (list[list]): A list of each row's key column values.
|
||||
value_names (list[str]): The value column names. If empty, no
|
||||
values will be used, even if value_values is provided.
|
||||
value_values (list[list]): A list of each row's value column values.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
allnames = []
|
||||
allnames.extend(key_names)
|
||||
allnames.extend(value_names)
|
||||
|
||||
if not value_names:
|
||||
# No value columns, therefore make a blank list so that the
|
||||
# following zip() works correctly.
|
||||
latter = "NOTHING"
|
||||
value_values = [() for x in range(len(key_values))]
|
||||
else:
|
||||
latter = (
|
||||
"UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in value_names)
|
||||
)
|
||||
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
|
||||
table,
|
||||
", ".join(k for k in allnames),
|
||||
", ".join("?" for _ in allnames),
|
||||
", ".join(key_names),
|
||||
latter,
|
||||
)
|
||||
|
||||
args = []
|
||||
|
||||
for x, y in zip(key_values, value_values):
|
||||
args.append(tuple(x) + tuple(y))
|
||||
|
||||
return txn.execute_batch(sql, args)
|
||||
|
||||
def _simple_select_one(self, table, keyvalues, retcols,
|
||||
allow_none=False, desc="_simple_select_one"):
|
||||
"""Executes a SELECT query on the named table, which is expected to
|
||||
@@ -1184,6 +1313,90 @@ class SQLBaseStore(object):
|
||||
be invalidated.
|
||||
"""
|
||||
txn.call_after(cache_func.invalidate, keys)
|
||||
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
|
||||
|
||||
def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
|
||||
"""Special case invalidation of caches based on current state.
|
||||
|
||||
We special case this so that we can batch the cache invalidations into a
|
||||
single replication poke.
|
||||
|
||||
Args:
|
||||
txn
|
||||
room_id (str): Room where state changed
|
||||
members_changed (iterable[str]): The user_ids of members that have changed
|
||||
"""
|
||||
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
||||
|
||||
# We need to be careful that the size of the `members_changed` list
|
||||
# isn't so large that it causes problems sending over replication, so we
|
||||
# send them in chunks.
|
||||
# Max line length is 16K, and max user ID length is 255, so 50 should
|
||||
# be safe.
|
||||
for chunk in batch_iter(members_changed, 50):
|
||||
keys = itertools.chain([room_id], chunk)
|
||||
self._send_invalidation_to_replication(
|
||||
txn, _CURRENT_STATE_CACHE_NAME, keys,
|
||||
)
|
||||
|
||||
def _invalidate_state_caches(self, room_id, members_changed):
|
||||
"""Invalidates caches that are based on the current state, but does
|
||||
not stream invalidations down replication.
|
||||
|
||||
Args:
|
||||
room_id (str): Room where state changed
|
||||
members_changed (iterable[str]): The user_ids of members that have
|
||||
changed
|
||||
"""
|
||||
for member in members_changed:
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_rooms_for_user_with_stream_ordering", (member,),
|
||||
)
|
||||
|
||||
for host in set(get_domain_from_id(u) for u in members_changed):
|
||||
self._attempt_to_invalidate_cache(
|
||||
"is_host_joined", (room_id, host,),
|
||||
)
|
||||
self._attempt_to_invalidate_cache(
|
||||
"was_host_joined", (room_id, host,),
|
||||
)
|
||||
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_users_in_room", (room_id,),
|
||||
)
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_room_summary", (room_id,),
|
||||
)
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_current_state_ids", (room_id,),
|
||||
)
|
||||
|
||||
def _attempt_to_invalidate_cache(self, cache_name, key):
|
||||
"""Attempts to invalidate the cache of the given name, ignoring if the
|
||||
cache doesn't exist. Mainly used for invalidating caches on workers,
|
||||
where they may not have the cache.
|
||||
|
||||
Args:
|
||||
cache_name (str)
|
||||
key (tuple)
|
||||
"""
|
||||
try:
|
||||
getattr(self, cache_name).invalidate(key)
|
||||
except AttributeError:
|
||||
# We probably haven't pulled in the cache in this worker,
|
||||
# which is fine.
|
||||
pass
|
||||
|
||||
def _send_invalidation_to_replication(self, txn, cache_name, keys):
|
||||
"""Notifies replication that given cache has been invalidated.
|
||||
|
||||
Note that this does *not* invalidate the cache locally.
|
||||
|
||||
Args:
|
||||
txn
|
||||
cache_name (str)
|
||||
keys (iterable[str])
|
||||
"""
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# get_next() returns a context manager which is designed to wrap
|
||||
@@ -1201,7 +1414,7 @@ class SQLBaseStore(object):
|
||||
table="cache_invalidation_stream",
|
||||
values={
|
||||
"stream_id": stream_id,
|
||||
"cache_func": cache_func.__name__,
|
||||
"cache_func": cache_name,
|
||||
"keys": list(keys),
|
||||
"invalidation_ts": self.clock.time_msec(),
|
||||
}
|
||||
@@ -1390,6 +1603,14 @@ class SQLBaseStore(object):
|
||||
|
||||
return cls.cursor_to_dict(txn)
|
||||
|
||||
@property
|
||||
def database_engine_name(self):
|
||||
return self.database_engine.module.__name__
|
||||
|
||||
def get_server_version(self):
|
||||
"""Returns a string describing the server version number"""
|
||||
return self.database_engine.server_version
|
||||
|
||||
|
||||
class _RollbackButIsFineException(Exception):
|
||||
""" This exception is used to rollback a transaction without implying
|
||||
|
||||
@@ -23,6 +23,7 @@ class PostgresEngine(object):
|
||||
self.module = database_module
|
||||
self.module.extensions.register_type(self.module.extensions.UNICODE)
|
||||
self.synchronous_commit = database_config.get("synchronous_commit", True)
|
||||
self._version = None # unknown as yet
|
||||
|
||||
def check_database(self, txn):
|
||||
txn.execute("SHOW SERVER_ENCODING")
|
||||
@@ -87,3 +88,27 @@ class PostgresEngine(object):
|
||||
"""
|
||||
txn.execute("SELECT nextval('state_group_id_seq')")
|
||||
return txn.fetchone()[0]
|
||||
|
||||
@property
|
||||
def server_version(self):
|
||||
"""Returns a string giving the server version. For example: '8.1.5'
|
||||
|
||||
Returns:
|
||||
string
|
||||
"""
|
||||
# note that this is a bit of a hack because it relies on on_new_connection
|
||||
# having been called at least once. Still, that should be a safe bet here.
|
||||
numver = self._version
|
||||
assert numver is not None
|
||||
|
||||
# https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSERVERVERSION
|
||||
if numver >= 100000:
|
||||
return "%i.%i" % (
|
||||
numver / 10000, numver % 10000,
|
||||
)
|
||||
else:
|
||||
return "%i.%i.%i" % (
|
||||
numver / 10000,
|
||||
(numver % 10000) / 100,
|
||||
numver % 100,
|
||||
)
|
||||
|
||||
@@ -70,6 +70,15 @@ class Sqlite3Engine(object):
|
||||
self._current_state_group_id += 1
|
||||
return self._current_state_group_id
|
||||
|
||||
@property
|
||||
def server_version(self):
|
||||
"""Gets a string giving the server version. For example: '3.22.0'
|
||||
|
||||
Returns:
|
||||
string
|
||||
"""
|
||||
return "%i.%i.%i" % self.module.sqlite_version_info
|
||||
|
||||
|
||||
# Following functions taken from: https://github.com/coleifer/peewee
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user