Compare commits
215 Commits
v0.99.1
...
matthew/sa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d0dc0014c1 | ||
|
|
054ed1ab5b | ||
|
|
b556fcda72 | ||
|
|
11d64ec4a9 | ||
|
|
d70c2748af | ||
|
|
48583cef7e | ||
|
|
cd7110c869 | ||
|
|
2db49ea476 | ||
|
|
b29693a30b | ||
|
|
8e28bc5eee | ||
|
|
aba5eeabd5 | ||
|
|
856c83f5f8 | ||
|
|
2c3548d9d8 | ||
|
|
3064952939 | ||
|
|
1beebe916f | ||
|
|
ac6a0d72b2 | ||
|
|
9ac72d9543 | ||
|
|
ac61b45a75 | ||
|
|
b131cc77df | ||
|
|
68f47d6744 | ||
|
|
f2a753ea38 | ||
|
|
76550c58d2 | ||
|
|
8267034a63 | ||
|
|
3134964054 | ||
|
|
46b0151524 | ||
|
|
95840d84d4 | ||
|
|
54f9ce11a7 | ||
|
|
d4dc527a1a | ||
|
|
1b2940b3bd | ||
|
|
1e315017d3 | ||
|
|
b5c13df0c4 | ||
|
|
4cff9376f7 | ||
|
|
7590e9fa28 | ||
|
|
44a4d65586 | ||
|
|
6bb1c028f1 | ||
|
|
6870fc496f | ||
|
|
f191be822b | ||
|
|
57426ec6a3 | ||
|
|
4bc7483518 | ||
|
|
09fc34c935 | ||
|
|
25814921f1 | ||
|
|
313987187e | ||
|
|
4dc945ba30 | ||
|
|
802884d4ee | ||
|
|
b8e6ed36c1 | ||
|
|
6fcb25202f | ||
|
|
7a4632af9c | ||
|
|
c0b6955e3b | ||
|
|
c74624a633 | ||
|
|
a1a6473293 | ||
|
|
c4414768af | ||
|
|
a712aa3a9c | ||
|
|
16565e67db | ||
|
|
40c2271680 | ||
|
|
6728bf3940 | ||
|
|
6946c20111 | ||
|
|
71669a0fba | ||
|
|
899a119c2b | ||
|
|
641c409e4e | ||
|
|
70ea2f4e1d | ||
|
|
96c408273e | ||
|
|
1330aa4a8f | ||
|
|
65f3fbfbf7 | ||
|
|
1d0f2ec812 | ||
|
|
c7b333c545 | ||
|
|
69efe6fb16 | ||
|
|
108d5fb20d | ||
|
|
9c598dddcb | ||
|
|
b1a90da82e | ||
|
|
16c7afa94c | ||
|
|
8aaf7ffc44 | ||
|
|
84c0a20dfe | ||
|
|
4b9e5076c4 | ||
|
|
07493607a8 | ||
|
|
bd398b874e | ||
|
|
e4b078a600 | ||
|
|
d730c2c22b | ||
|
|
890cb048fd | ||
|
|
5b9786ee00 | ||
|
|
65d1003d01 | ||
|
|
f5050e148c | ||
|
|
9342cc6ab1 | ||
|
|
21d3f82344 | ||
|
|
47a7e3928d | ||
|
|
65bf9f1119 | ||
|
|
41285ffe5b | ||
|
|
71304bfc8d | ||
|
|
59e0112209 | ||
|
|
d14e94bae4 | ||
|
|
b82c9cf462 | ||
|
|
f2891d2487 | ||
|
|
9982c71515 | ||
|
|
0969d688e3 | ||
|
|
5d3e3c051d | ||
|
|
a164134a53 | ||
|
|
1d9df51ff1 | ||
|
|
e28ef831e6 | ||
|
|
80467bbac3 | ||
|
|
7b288826b7 | ||
|
|
e07384c4e1 | ||
|
|
e1666af9be | ||
|
|
fcd6f01dc7 | ||
|
|
0abb094f1a | ||
|
|
6d65659b62 | ||
|
|
16e0680498 | ||
|
|
b9d6756b14 | ||
|
|
9bccd5e472 | ||
|
|
8184ae8a09 | ||
|
|
82fca11fc1 | ||
|
|
82ca6d1f9f | ||
|
|
633e5c933b | ||
|
|
3d672fec51 | ||
|
|
a06614bd2a | ||
|
|
b2200a8690 | ||
|
|
c88bc53903 | ||
|
|
8d98dc8ffe | ||
|
|
86920ac266 | ||
|
|
dbdc565dfd | ||
|
|
c594cc8076 | ||
|
|
ae753fed8c | ||
|
|
5f9bdf90fe | ||
|
|
c003450057 | ||
|
|
49b58f0a16 | ||
|
|
62175a20e5 | ||
|
|
1bb35e3a83 | ||
|
|
bc8fa1509d | ||
|
|
1c0eb8bbb2 | ||
|
|
a288bdf0b1 | ||
|
|
5a707a2f9a | ||
|
|
a8626901cd | ||
|
|
32590b7139 | ||
|
|
7c70b8f8a6 | ||
|
|
107aeb6915 | ||
|
|
968a30a75c | ||
|
|
0869f01e74 | ||
|
|
2b2466f78b | ||
|
|
561eebe170 | ||
|
|
34ac75ce2c | ||
|
|
92e6fb5c89 | ||
|
|
a9b5ea6fc1 | ||
|
|
f8b9ca53ce | ||
|
|
d154f5a055 | ||
|
|
f3ab0b2390 | ||
|
|
128902d60a | ||
|
|
4cc4400b4d | ||
|
|
fc2c245a1f | ||
|
|
459d3d5046 | ||
|
|
d328a93b51 | ||
|
|
af691e415c | ||
|
|
028267acd2 | ||
|
|
d08bac4136 | ||
|
|
c30f73c86a | ||
|
|
092b541401 | ||
|
|
45bb55c6de | ||
|
|
8b9ae6d3a6 | ||
|
|
94960cef03 | ||
|
|
12ae64ce0d | ||
|
|
fe725f7e45 | ||
|
|
e85aabb030 | ||
|
|
d9713e916e | ||
|
|
04dad5ac16 | ||
|
|
2f16857ca9 | ||
|
|
e07cc31cb8 | ||
|
|
68a53f825f | ||
|
|
32e54b472a | ||
|
|
915421065b | ||
|
|
d1b060b492 | ||
|
|
7033b05cad | ||
|
|
9caab0c364 | ||
|
|
dc5efc92a8 | ||
|
|
e83a190643 | ||
|
|
41c3f21c3b | ||
|
|
91c8a7f9f4 | ||
|
|
eb2b8523ae | ||
|
|
5b68e12fd8 | ||
|
|
6d02a13d81 | ||
|
|
4151111d95 | ||
|
|
6575df647d | ||
|
|
68d2869c8d | ||
|
|
da95867d30 | ||
|
|
bd4505f765 | ||
|
|
f86b695cbd | ||
|
|
af8a2f679b | ||
|
|
1895d14e12 | ||
|
|
b99c532c1c | ||
|
|
02c729d6b0 | ||
|
|
02c46acc6a | ||
|
|
bfcefbb230 | ||
|
|
6f47bc3fb2 | ||
|
|
8e32f26cb8 | ||
|
|
cb12a37708 | ||
|
|
f61b2068e6 | ||
|
|
f666fe36d7 | ||
|
|
bf4fd14806 | ||
|
|
f830a3be2a | ||
|
|
649fe1c2be | ||
|
|
f595d6ac57 | ||
|
|
f311018823 | ||
|
|
4074c8b968 | ||
|
|
eaf4d11af9 | ||
|
|
b02465b9db | ||
|
|
00cf679bf2 | ||
|
|
0927adb012 | ||
|
|
7fc1196a36 | ||
|
|
6cb415b63f | ||
|
|
c6e75c9f2d | ||
|
|
c1dfd6a18a | ||
|
|
bb4fd8f927 | ||
|
|
dc70789056 | ||
|
|
93f7d2df3e | ||
|
|
6a8f902edb | ||
|
|
ef2228c890 | ||
|
|
4588b0d64a | ||
|
|
d528406cb8 | ||
|
|
ae19a7db8c |
13
.buildkite/.env
Normal file
13
.buildkite/.env
Normal file
@@ -0,0 +1,13 @@
|
||||
CI
|
||||
BUILDKITE
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_BRANCH
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_JOB_ID
|
||||
BUILDKITE_BUILD_URL
|
||||
BUILDKITE_PROJECT_SLUG
|
||||
BUILDKITE_COMMIT
|
||||
BUILDKITE_PULL_REQUEST
|
||||
BUILDKITE_TAG
|
||||
CODECOV_TOKEN
|
||||
TRIAL_FLAGS
|
||||
21
.buildkite/docker-compose.py27.pg94.yaml
Normal file
21
.buildkite/docker-compose.py27.pg94.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
21
.buildkite/docker-compose.py27.pg95.yaml
Normal file
21
.buildkite/docker-compose.py27.pg95.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
21
.buildkite/docker-compose.py35.pg94.yaml
Normal file
21
.buildkite/docker-compose.py35.pg94.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
21
.buildkite/docker-compose.py35.pg95.yaml
Normal file
21
.buildkite/docker-compose.py35.pg95.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
21
.buildkite/docker-compose.py37.pg11.yaml
Normal file
21
.buildkite/docker-compose.py37.pg11.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:11
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
21
.buildkite/docker-compose.py37.pg95.yaml
Normal file
21
.buildkite/docker-compose.py37.pg95.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
157
.buildkite/pipeline.yml
Normal file
157
.buildkite/pipeline.yml
Normal file
@@ -0,0 +1,157 @@
|
||||
env:
|
||||
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
|
||||
|
||||
steps:
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e pep8"
|
||||
label: "\U0001F9F9 PEP-8"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e packaging"
|
||||
label: "\U0001F9F9 packaging"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e check_isort"
|
||||
label: "\U0001F9F9 isort"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "scripts-dev/check-newsfragment"
|
||||
label: ":newspaper: Newsfile"
|
||||
branches: "!master !develop !release-*"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
|
||||
- wait
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e check-sampleconfig"
|
||||
label: "\U0001F9F9 check-sample-config"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py27,codecov"
|
||||
label: ":python: 2.7 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py35,codecov"
|
||||
label: ":python: 3.5 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.5"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py36,codecov"
|
||||
label: ":python: 3.6 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py37,codecov"
|
||||
label: ":python: 3.7 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.7"
|
||||
propagate-environment: true
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg94.yaml
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg95.yaml
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg94.yaml
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg95.yaml
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg95.yaml
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 11"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg11.yaml
|
||||
84
.gitignore
vendored
84
.gitignore
vendored
@@ -1,64 +1,36 @@
|
||||
*.pyc
|
||||
.*.swp
|
||||
# filename patterns
|
||||
*~
|
||||
.*.swp
|
||||
.#*
|
||||
*.deb
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
|
||||
.DS_Store
|
||||
*.pyc
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
logs/
|
||||
dbs/
|
||||
*.egg
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
pip-wheel-metadata/
|
||||
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
homeserver*.log
|
||||
homeserver*.log.*
|
||||
homeserver*.pid
|
||||
/homeserver*.yaml
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.signing.key
|
||||
/*.tls.crt
|
||||
/*.tls.key
|
||||
/uploads
|
||||
/media_store/
|
||||
|
||||
*.signing.key
|
||||
*.tls.crt
|
||||
*.tls.dh
|
||||
*.tls.key
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
/.vscode/
|
||||
|
||||
.coverage*
|
||||
coverage.*
|
||||
!.coveragerc
|
||||
htmlcov
|
||||
# build products
|
||||
/.coverage*
|
||||
!/.coveragerc
|
||||
/.tox
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
uploads
|
||||
cache
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
|
||||
*.tac
|
||||
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
*venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
.tox
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
*.deb
|
||||
/debs
|
||||
|
||||
83
.travis.yml
83
.travis.yml
@@ -1,83 +0,0 @@
|
||||
sudo: false
|
||||
language: python
|
||||
|
||||
cache:
|
||||
directories:
|
||||
# we only bother to cache the wheels; parts of the http cache get
|
||||
# invalidated every build (because they get served with a max-age of 600
|
||||
# seconds), which means that we end up re-uploading the whole cache for
|
||||
# every build, which is time-consuming In any case, it's not obvious that
|
||||
# downloading the cache from S3 would be much faster than downloading the
|
||||
# originals from pypi.
|
||||
#
|
||||
- $HOME/.cache/pip/wheels
|
||||
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
|
||||
# don't clone the whole repo history, one commit will do
|
||||
git:
|
||||
depth: 1
|
||||
|
||||
# only build branches we care about (PRs are built seperately)
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- /^release-v/
|
||||
|
||||
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.5
|
||||
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- # we only need to check for the newsfragment if it's a PR build
|
||||
if: type = pull_request
|
||||
python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
- tox -e $TOX_ENV
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
# if we don't have python3.6 in this environment, travis unhelpfully gives us
|
||||
# a `python3.6` on our path which does nothing but spit out a warning. Tox
|
||||
# tries to run it (even if we're not running a py36 env), so the build logs
|
||||
# then have warnings which look like errors. To reduce the noise, remove the
|
||||
# non-functional python3.6.
|
||||
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
|
||||
|
||||
script:
|
||||
- tox -e $TOX_ENV
|
||||
69
CHANGES.md
69
CHANGES.md
@@ -1,3 +1,72 @@
|
||||
Synapse 0.99.2 (2019-03-01)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
|
||||
- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
|
||||
- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
|
||||
- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
|
||||
- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
|
||||
- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
|
||||
- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
|
||||
- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
|
||||
- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
|
||||
- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
|
||||
- The dependency checker now correctly reports a version mismatch for optional
|
||||
dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
|
||||
- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
|
||||
- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
|
||||
- Fix an issue in the database migration script where the
|
||||
`e2e_room_keys.is_verified` column wasn't considered as
|
||||
a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
|
||||
- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
|
||||
- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
|
||||
- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
|
||||
- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
|
||||
- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
|
||||
- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
|
||||
- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
|
||||
- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
|
||||
- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
|
||||
- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
|
||||
- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
|
||||
- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
|
||||
- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
|
||||
- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
|
||||
- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
|
||||
- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
|
||||
- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
|
||||
- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
|
||||
- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
|
||||
- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
|
||||
- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
|
||||
- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
|
||||
|
||||
|
||||
Synapse 0.99.1.1 (2019-02-14)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix "TypeError: '>' not supported" when starting without an existing certificate.
|
||||
Fix a bug where an existing certificate would be reprovisoned every day. ([\#4648](https://github.com/matrix-org/synapse/issues/4648))
|
||||
|
||||
|
||||
Synapse 0.99.1 (2019-02-14)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||
If your change breaks the build, this will be shown in GitHub, so please
|
||||
@@ -74,16 +74,39 @@ entry. These are managed by Towncrier
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix".
|
||||
Matrix.".
|
||||
|
||||
Debian changelog
|
||||
----------------
|
||||
|
||||
Changes which affect the debian packaging files (in ``debian``) are an
|
||||
exception.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command::
|
||||
|
||||
dch
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
35
INSTALL.md
35
INSTALL.md
@@ -358,26 +358,25 @@ For information on using a reverse proxy, see
|
||||
[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`.
|
||||
`homeserver.yaml`, as follows:
|
||||
|
||||
First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) and space at the start of
|
||||
each line). The relevant lines are like this:
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either point
|
||||
these settings at an existing certificate and key, or you can enable Synapse's
|
||||
built-in ACME (Let's Encrypt) support. Instructions for having Synapse
|
||||
automatically provision and renew federation certificates through ACME can be
|
||||
found at [ACME.md](docs/ACME.md).
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
|
||||
## Registering a user
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ prune .circleci
|
||||
prune .coveragerc
|
||||
prune debian
|
||||
prune .codecov.yml
|
||||
prune .buildkite
|
||||
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
@@ -199,6 +199,8 @@ by installing the ``libjemalloc1`` package and adding this line to
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
@@ -39,7 +39,7 @@ instructions that may be required are listed later in this document.
|
||||
./synctl restart
|
||||
|
||||
|
||||
To check whether your update was sucessful, you can check the Server header
|
||||
To check whether your update was successful, you can check the Server header
|
||||
returned by the Client-Server API:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
1
changelog.d/4740.bugfix
Normal file
1
changelog.d/4740.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
'event_id' is now a required parameter in federated state requests, as per the matrix spec.
|
||||
1
changelog.d/4749.bugfix
Normal file
1
changelog.d/4749.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix tightloop over connecting to replication server.
|
||||
1
changelog.d/4752.misc
Normal file
1
changelog.d/4752.misc
Normal file
@@ -0,0 +1 @@
|
||||
Change from TravisCI to Buildkite for CI.
|
||||
1
changelog.d/4757.feature
Normal file
1
changelog.d/4757.feature
Normal file
@@ -0,0 +1 @@
|
||||
Move server key queries to federation reader.
|
||||
1
changelog.d/4757.misc
Normal file
1
changelog.d/4757.misc
Normal file
@@ -0,0 +1 @@
|
||||
When presence is disabled don't send over replication.
|
||||
1
changelog.d/4759.feature
Normal file
1
changelog.d/4759.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for /account/3pid REST endpoint to client_reader worker.
|
||||
1
changelog.d/4763.bugfix
Normal file
1
changelog.d/4763.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix parsing of Content-Disposition headers on remote media requests and URL previews.
|
||||
1
changelog.d/4765.misc
Normal file
1
changelog.d/4765.misc
Normal file
@@ -0,0 +1 @@
|
||||
Minor docstring fixes for MatrixFederationAgent.
|
||||
1
changelog.d/4770.misc
Normal file
1
changelog.d/4770.misc
Normal file
@@ -0,0 +1 @@
|
||||
Optimise EDU transmission for the federation_sender worker.
|
||||
1
changelog.d/4771.misc
Normal file
1
changelog.d/4771.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update test_typing to use HomeserverTestCase.
|
||||
1
changelog.d/4776.bugfix
Normal file
1
changelog.d/4776.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix incorrect log about not persisting duplicate state event.
|
||||
1
changelog.d/4790.bugfix
Normal file
1
changelog.d/4790.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix v4v6 option in HAProxy example config. Contributed by Flakebi.
|
||||
1
changelog.d/4791.feature
Normal file
1
changelog.d/4791.feature
Normal file
@@ -0,0 +1 @@
|
||||
Include a default configuration file in the 'docs' directory.
|
||||
1
changelog.d/4797.misc
Normal file
1
changelog.d/4797.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up read-receipt handling.
|
||||
1
changelog.d/4798.misc
Normal file
1
changelog.d/4798.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add some debug about processing read receipts.
|
||||
1
changelog.d/4801.feature
Normal file
1
changelog.d/4801.feature
Normal file
@@ -0,0 +1 @@
|
||||
Include a default configuration file in the 'docs' directory.
|
||||
13
debian/changelog
vendored
13
debian/changelog
vendored
@@ -1,3 +1,16 @@
|
||||
matrix-synapse-py3 (0.99.2) stable; urgency=medium
|
||||
|
||||
* Fix overwriting of config settings on upgrade.
|
||||
* New synapse release 0.99.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Mar 2019 10:55:08 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 17:19:44 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1) stable; urgency=medium
|
||||
|
||||
[ Damjan Georgievski ]
|
||||
|
||||
1
debian/install
vendored
1
debian/install
vendored
@@ -1 +1,2 @@
|
||||
debian/log.yaml etc/matrix-synapse
|
||||
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
|
||||
|
||||
130
debian/manage_debconf.pl
vendored
Executable file
130
debian/manage_debconf.pl
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Interface between our config files and the debconf database.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# manage_debconf.pl <action>
|
||||
#
|
||||
# where <action> can be:
|
||||
#
|
||||
# read: read the configuration from the yaml into debconf
|
||||
# update: update the yaml config according to the debconf database
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Debconf::Client::ConfModule (qw/get set/);
|
||||
|
||||
# map from the name of a setting in our .yaml file to the relevant debconf
|
||||
# setting.
|
||||
my %MAPPINGS=(
|
||||
server_name => 'matrix-synapse/server-name',
|
||||
report_stats => 'matrix-synapse/report-stats',
|
||||
);
|
||||
|
||||
# enable debug if dpkg --debug
|
||||
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
|
||||
|
||||
sub read_config {
|
||||
my @files = @_;
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "reading $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
# rudimentary parsing which (a) avoids having to depend on a yaml library,
|
||||
# and (b) is tolerant of yaml errors
|
||||
while($_ = <$FH>) {
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
$setting = quotemeta $setting;
|
||||
if(/^${setting}\s*:(.*)$/) {
|
||||
my $val = $1;
|
||||
|
||||
# remove leading/trailing whitespace
|
||||
$val =~ s/^\s*//;
|
||||
$val =~ s/\s*$//;
|
||||
|
||||
# remove surrounding quotes
|
||||
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
|
||||
$val = $1;
|
||||
}
|
||||
|
||||
print STDERR ">> $debconf = $val\n" if $DEBUG;
|
||||
set($debconf, $val);
|
||||
}
|
||||
}
|
||||
}
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
sub update_config {
|
||||
my @files = @_;
|
||||
|
||||
my %substs = ();
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
my @res = get($debconf);
|
||||
$substs{$setting} = $res[1] if $res[0] == 0;
|
||||
}
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "checking $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
my $updated = 0;
|
||||
|
||||
# read the whole file into memory
|
||||
my @lines = <$FH>;
|
||||
|
||||
while (my ($setting, $val) = each %substs) {
|
||||
$setting = quotemeta $setting;
|
||||
|
||||
map {
|
||||
if (/^${setting}\s*:\s*(.*)\s*$/) {
|
||||
my $current = $1;
|
||||
if ($val ne $current) {
|
||||
$_ = "${setting}: $val\n";
|
||||
$updated = 1;
|
||||
}
|
||||
}
|
||||
} @lines;
|
||||
}
|
||||
close $FH;
|
||||
|
||||
next unless $updated;
|
||||
|
||||
print STDERR "updating $file\n" if $DEBUG;
|
||||
open $FH, ">", $file or die "unable to update $file";
|
||||
print $FH @lines;
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
|
||||
my $read = 0;
|
||||
my $update = 0;
|
||||
|
||||
if (not $cmd) {
|
||||
die "must specify a command to perform\n";
|
||||
} elsif ($cmd eq 'read') {
|
||||
$read = 1;
|
||||
} elsif ($cmd eq 'update') {
|
||||
$update = 1;
|
||||
} else {
|
||||
die "unknown command '$cmd'\n";
|
||||
}
|
||||
|
||||
my @files = (
|
||||
"/etc/matrix-synapse/homeserver.yaml",
|
||||
glob("/etc/matrix-synapse/conf.d/*.yaml"),
|
||||
);
|
||||
|
||||
if ($read) {
|
||||
read_config(@files);
|
||||
} elsif ($update) {
|
||||
update_config(@files);
|
||||
}
|
||||
@@ -4,6 +4,9 @@ set -e
|
||||
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
# try to update the debconf db according to whatever is in the config files
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
|
||||
|
||||
db_input high matrix-synapse/server-name || true
|
||||
db_input high matrix-synapse/report-stats || true
|
||||
db_go
|
||||
31
debian/matrix-synapse-py3.postinst
vendored
31
debian/matrix-synapse-py3.postinst
vendored
@@ -8,19 +8,36 @@ USER="matrix-synapse"
|
||||
|
||||
case "$1" in
|
||||
configure|reconfigure)
|
||||
# Set server name in config file
|
||||
|
||||
# generate template config files if they don't exist
|
||||
mkdir -p "/etc/matrix-synapse/conf.d/"
|
||||
db_get matrix-synapse/server-name
|
||||
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
|
||||
cat > "$CONFIGFILE_SERVERNAME" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
if [ "$RET" ]; then
|
||||
echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
|
||||
# The domain name of the server, with optional explicit port.
|
||||
# This is used by remote servers to connect to this server,
|
||||
# e.g. matrix.org, localhost:8080, etc.
|
||||
# This is also the last part of your UserID.
|
||||
#
|
||||
server_name: ''
|
||||
EOF
|
||||
fi
|
||||
|
||||
db_get matrix-synapse/report-stats
|
||||
if [ "$RET" ]; then
|
||||
echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
|
||||
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
|
||||
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
# Whether to report anonymized homeserver usage statistics.
|
||||
report_stats: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
# update the config files according to whatever is in the debconf database
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
7
demo/.gitignore
vendored
Normal file
7
demo/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
*.pid
|
||||
|
||||
/media_store.*
|
||||
/etc
|
||||
@@ -58,7 +58,11 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
sqlite3
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
|
||||
RUN apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
# cached cache from docker the first time.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
WORKDIR /synapse/source
|
||||
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
|
||||
|
||||
12
docs/.sample_config_header.yaml
Normal file
12
docs/.sample_config_header.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
# The config is maintained as an up-to-date snapshot of the default
|
||||
# homeserver.yaml configuration generated by Synapse.
|
||||
#
|
||||
# It is intended to act as a reference for the default configuration,
|
||||
# helping admins keep track of new options and other changes, and compare
|
||||
# their configs with the current default. As such, many of the actual
|
||||
# config values shown are placeholders.
|
||||
#
|
||||
# It is *not* intended to be copied and used as the basis for a real
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
19
docs/ACME.md
19
docs/ACME.md
@@ -10,13 +10,14 @@ through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
|
||||
|
||||
In the case that your `server_name` config variable is the same as
|
||||
the hostname that the client connects to, then the same certificate can be
|
||||
used between client and federation ports without issue.
|
||||
used between client and federation ports without issue.
|
||||
|
||||
For a sample configuration, please inspect the new ACME section in the example
|
||||
generated config by running the `generate-config` executable. For example:
|
||||
If your configuration file does not already have an `acme` section, you can
|
||||
generate an example config by running the `generate_config` executable. For
|
||||
example:
|
||||
|
||||
```
|
||||
~/synapse/env3/bin/generate-config
|
||||
~/synapse/env3/bin/generate_config
|
||||
```
|
||||
|
||||
You will need to provide Let's Encrypt (or another ACME provider) access to
|
||||
@@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
|
||||
(Do not run Synapse with root permissions!) Detailed instructions are
|
||||
available under "ACME setup" below.
|
||||
|
||||
If you are already using self-signed certificates, you will need to back up
|
||||
or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
|
||||
Synapse's root directory), Synapse's ACME implementation will not overwrite
|
||||
them.
|
||||
If you already have certificates, you will need to back up or delete them
|
||||
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||
directory), Synapse's ACME implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
@@ -87,7 +87,6 @@ acme:
|
||||
port: 8009
|
||||
```
|
||||
|
||||
|
||||
#### Authbind
|
||||
|
||||
`authbind` allows a program which does not run as root to bind to
|
||||
@@ -127,4 +126,4 @@ acme:
|
||||
|
||||
Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
|
||||
|
||||
Finally, start/restart Synapse.
|
||||
Finally, start/restart Synapse.
|
||||
|
||||
@@ -125,7 +125,7 @@ doing one of the following:
|
||||
* Use Synapse's [ACME support](./ACME.md), and forward port 80 on the
|
||||
`server_name` domain to your Synapse instance.
|
||||
|
||||
### Option 2: run Synapse behind a reverse proxy
|
||||
#### Option 2: run Synapse behind a reverse proxy
|
||||
|
||||
If you have an existing reverse proxy set up with correct TLS certificates for
|
||||
your domain, you can simply route all traffic through the reverse proxy by
|
||||
|
||||
@@ -79,12 +79,28 @@ Let's assume that we expect clients to connect to our server at
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
<Location />
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
* HAProxy::
|
||||
|
||||
frontend https
|
||||
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix hdr(host) -i matrix.example.com
|
||||
use_backend matrix if matrix
|
||||
|
||||
frontend matrix-federation
|
||||
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
1046
docs/sample_config.yaml
Normal file
1046
docs/sample_config.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -137,7 +137,6 @@ for each stream so that on reconneciton it can start streaming from the correct
|
||||
place. Note: not all RDATA have valid tokens due to batching. See
|
||||
``RdataCommand`` for more details.
|
||||
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
@@ -189,7 +188,9 @@ RDATA (S)
|
||||
A single update in a stream
|
||||
|
||||
POSITION (S)
|
||||
The position of the stream has been updated
|
||||
The position of the stream has been updated. Sent to the client after all
|
||||
missing updates for a stream have been sent to the client and they're now
|
||||
up to date.
|
||||
|
||||
ERROR (S, C)
|
||||
There was an error
|
||||
@@ -221,3 +222,28 @@ SYNC (S, C)
|
||||
|
||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||
format of each command.
|
||||
|
||||
|
||||
Cache Invalidation Stream
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The cache invalidation stream is used to inform workers when they need to
|
||||
invalidate any of their caches in the data store. This is done by streaming all
|
||||
cache invalidations done on master down to the workers, assuming that any caches
|
||||
on the workers also exist on the master.
|
||||
|
||||
Each individual cache invalidation results in a row being sent down replication,
|
||||
which includes the cache name (the name of the function) and they key to
|
||||
invalidate. For example::
|
||||
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated at the
|
||||
same time with the same key. To reduce traffic we batch those invalidations into
|
||||
a single poke by defining a special cache name that workers understand to mean
|
||||
to expand to invalidate the correct caches.
|
||||
|
||||
Currently the special cache names are declared in ``synapse/storage/_base.py``
|
||||
and are:
|
||||
|
||||
1. ``cs_cache_fake`` ─ invalidates caches that depend on the current state
|
||||
|
||||
@@ -182,6 +182,7 @@ endpoints matching the following regular expressions::
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/send/
|
||||
^/_matrix/key/v2/query
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
@@ -222,6 +223,14 @@ following regular expressions::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance::
|
||||
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
41
scripts-dev/check-newsfragment
Executable file
41
scripts-dev/check-newsfragment
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
set -e
|
||||
|
||||
# make sure that origin/develop is up to date
|
||||
git remote set-branches --add origin develop
|
||||
git fetch --depth=1 origin develop
|
||||
|
||||
UPSTREAM=origin/develop
|
||||
|
||||
# if there are changes in the debian directory, check that the debian changelog
|
||||
# has been updated
|
||||
if ! git diff --quiet $UPSTREAM... -- debian; then
|
||||
if git diff --quiet $UPSTREAM... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if there are changes *outside* the debian directory, check that the
|
||||
# newsfragments have been updated.
|
||||
if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
|
||||
tox -e check-newsfragment
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "--------------------------"
|
||||
echo
|
||||
|
||||
# check that any new newsfiles on this branch end with a full stop.
|
||||
for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
|
||||
lastchar=`tr -d '\n' < $f | tail -c 1`
|
||||
if [ $lastchar != '.' ]; then
|
||||
echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
18
scripts-dev/generate_sample_config
Executable file
18
scripts-dev/generate_sample_config
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
set -e
|
||||
|
||||
cd `dirname $0`/..
|
||||
|
||||
SAMPLE_CONFIG="docs/sample_config.yaml"
|
||||
|
||||
if [ "$1" == "--check" ]; then
|
||||
diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
|
||||
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
|
||||
fi
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -50,6 +51,13 @@ if __name__ == "__main__":
|
||||
help="File to write the configuration to. Default: stdout",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--header-file",
|
||||
type=argparse.FileType('r'),
|
||||
help="File from which to read a header, which will be printed before the "
|
||||
"generated config.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
report_stats = args.report_stats
|
||||
@@ -64,4 +72,7 @@ if __name__ == "__main__":
|
||||
report_stats=report_stats,
|
||||
)
|
||||
|
||||
if args.header_file:
|
||||
shutil.copyfileobj(args.header_file, args.output_file)
|
||||
|
||||
args.output_file.write(conf)
|
||||
|
||||
@@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
|
||||
"group_summary_users": ["is_public"],
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.99.1"
|
||||
__version__ = "0.99.2"
|
||||
|
||||
@@ -25,10 +25,12 @@ from daemonize import Daemonize
|
||||
from twisted.internet import error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -270,9 +272,37 @@ def start(hs, listeners=None):
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().start_profiling()
|
||||
|
||||
setup_sentry(hs)
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
reactor = hs.get_reactor()
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup_sentry(hs):
|
||||
"""Enable sentry integration, if enabled in configuration
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
"""
|
||||
|
||||
if not hs.config.sentry_enabled:
|
||||
return
|
||||
|
||||
import sentry_sdk
|
||||
sentry_sdk.init(
|
||||
dsn=hs.config.sentry_dsn,
|
||||
release=get_version_string(synapse),
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server_name)
|
||||
|
||||
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
@@ -47,6 +48,8 @@ from synapse.rest.client.v1.room import (
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -92,6 +95,9 @@ class ClientReaderServer(HomeServer):
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
RegisterRestServlet(self).register(resource)
|
||||
LoginRestServlet(self).register(resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
|
||||
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -40,9 +40,11 @@ from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -62,6 +64,7 @@ class FederationReaderSlavedStore(
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
@@ -97,6 +100,9 @@ class FederationReaderServer(HomeServer):
|
||||
),
|
||||
})
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
|
||||
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -66,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
try:
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,11 +15,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -27,6 +29,7 @@ from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
@@ -394,10 +397,10 @@ def setup(config_options):
|
||||
# is less than our re-registration threshold.
|
||||
provision = False
|
||||
|
||||
if (cert_days_remaining is None):
|
||||
provision = True
|
||||
|
||||
if cert_days_remaining > hs.config.acme_reprovision_threshold:
|
||||
if (
|
||||
cert_days_remaining is None or
|
||||
cert_days_remaining < hs.config.acme_reprovision_threshold
|
||||
):
|
||||
provision = True
|
||||
|
||||
if provision:
|
||||
@@ -438,7 +441,11 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
|
||||
# this gives better tracebacks than traceback.print_exc()
|
||||
Failure().printTraceback(file=sys.stderr)
|
||||
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
@@ -548,6 +555,9 @@ def run(hs):
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
|
||||
@@ -180,9 +180,7 @@ class Config(object):
|
||||
Returns:
|
||||
str: the yaml config file
|
||||
"""
|
||||
default_config = "# vim:ft=yaml\n"
|
||||
|
||||
default_config += "\n\n".join(
|
||||
default_config = "\n\n".join(
|
||||
dedent(conf)
|
||||
for conf in self.invoke_all(
|
||||
"default_config",
|
||||
@@ -297,19 +295,26 @@ class Config(object):
|
||||
"Must specify a server_name to a generate config for."
|
||||
" Pass -H server.name."
|
||||
)
|
||||
|
||||
config_str = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
data_dir_path=os.getcwd(),
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
generate_secrets=True,
|
||||
)
|
||||
|
||||
if not cls.path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "w") as config_file:
|
||||
config_str = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
data_dir_path=os.getcwd(),
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
generate_secrets=True,
|
||||
config_file.write(
|
||||
"# vim:ft=yaml\n\n"
|
||||
)
|
||||
config = yaml.load(config_str)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_str)
|
||||
|
||||
config = yaml.load(config_str)
|
||||
obj.invoke_all("generate_files", config)
|
||||
|
||||
print(
|
||||
(
|
||||
"A config file has been generated in %r for server name"
|
||||
|
||||
@@ -33,6 +33,7 @@ class ApiConfig(Config):
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
#
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
|
||||
@@ -38,10 +38,12 @@ class AppServiceConfig(Config):
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
#
|
||||
app_service_config_files: []
|
||||
|
||||
# Whether or not to track application service IP addresses. Implicitly
|
||||
# enables MAU tracking for application service users.
|
||||
#
|
||||
track_appservice_user_ips: False
|
||||
"""
|
||||
|
||||
|
||||
@@ -30,19 +30,22 @@ class CaptchaConfig(Config):
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
#
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
#
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
# public/private key.
|
||||
#
|
||||
enable_registration_captcha: False
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
|
||||
"""
|
||||
|
||||
@@ -38,6 +38,7 @@ class CasConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable CAS for registration and login.
|
||||
#
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
|
||||
@@ -54,20 +54,20 @@ DEFAULT_CONFIG = """\
|
||||
# for an account. Has no effect unless `require_at_registration` is enabled.
|
||||
# Defaults to "Privacy Policy".
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ class DatabaseConfig(Config):
|
||||
def default_config(self, data_dir_path, **kwargs):
|
||||
database_path = os.path.join(data_dir_path, "homeserver.db")
|
||||
return """\
|
||||
# Database configuration
|
||||
## Database ##
|
||||
|
||||
database:
|
||||
# The database engine name
|
||||
name: "sqlite3"
|
||||
|
||||
@@ -24,9 +24,11 @@ class GroupsConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# Whether to allow non server admins to create groups on this server
|
||||
#
|
||||
enable_group_creation: false
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# group_creation_prefix: "unofficial/"
|
||||
#
|
||||
#group_creation_prefix: "unofficial/"
|
||||
"""
|
||||
|
||||
@@ -46,8 +46,8 @@ class JWTConfig(Config):
|
||||
return """\
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
#jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
"""
|
||||
|
||||
@@ -40,7 +40,7 @@ class KeyConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.signing_key = self.read_signing_key(config["signing_key_path"])
|
||||
self.old_signing_keys = self.read_old_signing_keys(
|
||||
config["old_signing_keys"]
|
||||
config.get("old_signing_keys", {})
|
||||
)
|
||||
self.key_refresh_interval = self.parse_duration(
|
||||
config["key_refresh_interval"]
|
||||
@@ -56,7 +56,7 @@ class KeyConfig(Config):
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing missing macaroon_secret_key")
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
@@ -83,24 +83,29 @@ class KeyConfig(Config):
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
# the registration_shared_secret is used, if one is given; otherwise,
|
||||
# a secret key is derived from the signing key.
|
||||
#
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
#
|
||||
%(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
#
|
||||
signing_key_path: "%(base_key_name)s.signing.key"
|
||||
|
||||
# The keys that the server used to sign messages with but won't use
|
||||
# to sign new messages. E.g. it has lost its private key
|
||||
old_signing_keys: {}
|
||||
#
|
||||
#old_signing_keys:
|
||||
# "ed25519:auto":
|
||||
# # Base64 encoded public key
|
||||
# key: "The public part of your old signing key."
|
||||
@@ -111,9 +116,11 @@ class KeyConfig(Config):
|
||||
# Used to set the valid_until_ts in /key/v2 APIs.
|
||||
# Determines how quickly servers will query to check which keys
|
||||
# are still valid.
|
||||
#
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
#
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
|
||||
@@ -81,8 +81,11 @@ class LoggingConfig(Config):
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||
return """
|
||||
return """\
|
||||
## Logging ##
|
||||
|
||||
# A yaml python logging config file
|
||||
#
|
||||
log_config: "%(log_config)s"
|
||||
""" % locals()
|
||||
|
||||
@@ -242,3 +245,5 @@ def setup_logging(config, use_worker_options=False):
|
||||
[_log],
|
||||
redirectStandardIO=not config.no_redirect_stdio,
|
||||
)
|
||||
if not config.no_redirect_stdio:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
@@ -13,7 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_SENTRY = (
|
||||
"""Missing sentry-sdk library. This is required to enable sentry
|
||||
integration.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class MetricsConfig(Config):
|
||||
@@ -23,12 +29,38 @@ class MetricsConfig(Config):
|
||||
self.metrics_port = config.get("metrics_port")
|
||||
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
|
||||
|
||||
self.sentry_enabled = "sentry" in config
|
||||
if self.sentry_enabled:
|
||||
try:
|
||||
import sentry_sdk # noqa F401
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_SENTRY)
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
raise ConfigError(
|
||||
"sentry.dsn field is required when sentry integration is enabled",
|
||||
)
|
||||
|
||||
def default_config(self, report_stats=None, **kwargs):
|
||||
res = """\
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
#
|
||||
enable_metrics: False
|
||||
|
||||
# Enable sentry integration
|
||||
# NOTE: While attempts are made to ensure that the logs don't contain
|
||||
# any sensitive information, this cannot be guaranteed. By enabling
|
||||
# this option the sentry server may therefore receive sensitive
|
||||
# information, and it in turn may then diseminate sensitive information
|
||||
# through insecure notification channels if so configured.
|
||||
#
|
||||
#sentry:
|
||||
# dsn: "..."
|
||||
|
||||
# Whether or not to report anonymized homeserver usage statistics.
|
||||
"""
|
||||
|
||||
if report_stats is None:
|
||||
|
||||
@@ -28,6 +28,7 @@ class PasswordConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable password for login.
|
||||
#
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
|
||||
@@ -52,18 +52,18 @@ class PasswordAuthProviderConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
#password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
|
||||
@@ -51,11 +51,11 @@ class PushConfig(Config):
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
# include_content: true
|
||||
"""
|
||||
|
||||
@@ -32,27 +32,34 @@ class RatelimitConfig(Config):
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
#
|
||||
rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
#
|
||||
rc_message_burst_count: 10.0
|
||||
|
||||
# The federation window size in milliseconds
|
||||
#
|
||||
federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
#
|
||||
federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
#
|
||||
federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
#
|
||||
federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
#
|
||||
federation_rc_concurrent: 3
|
||||
"""
|
||||
|
||||
@@ -70,28 +70,29 @@ class RegistrationConfig(Config):
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
# registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
#registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Explicitly disable asking for MSISDNs from the registration
|
||||
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
|
||||
#
|
||||
# disable_msisdn_registration = True
|
||||
#disable_msisdn_registration: True
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
#
|
||||
%(registration_shared_secret)s
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
@@ -99,11 +100,13 @@ class RegistrationConfig(Config):
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
#
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
# participate in rooms hosted on this server which have been made
|
||||
# accessible to anonymous users.
|
||||
#
|
||||
allow_guest_access: False
|
||||
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
@@ -112,27 +115,30 @@ class RegistrationConfig(Config):
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
# default_identity_server: https://matrix.org
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
# - "#example:example.com"
|
||||
|
||||
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||
# the rooms exist by creating them when the first user on the
|
||||
# homeserver registers.
|
||||
# Setting to false means that if the rooms are not manually created,
|
||||
# users cannot be auto-joined since they do not exist.
|
||||
#
|
||||
autocreate_auto_join_rooms: true
|
||||
""" % locals()
|
||||
|
||||
|
||||
@@ -180,29 +180,34 @@ class ContentRepositoryConfig(Config):
|
||||
uploads_path = os.path.join(data_dir_path, "uploads")
|
||||
return r"""
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
#
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
# Media storage providers allow media to be stored in different
|
||||
# locations.
|
||||
# media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
# Directory where in-progress uploads are stored.
|
||||
#
|
||||
uploads_path: "%(uploads_path)s"
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
#
|
||||
max_upload_size: "10M"
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
#
|
||||
max_image_pixels: "32M"
|
||||
|
||||
# Whether to generate new thumbnails on the fly to precisely match
|
||||
@@ -210,9 +215,11 @@ class ContentRepositoryConfig(Config):
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalculated list.
|
||||
#
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
# List of thumbnails to precalculate when an image is uploaded.
|
||||
#
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
@@ -233,6 +240,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
#
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
@@ -243,16 +251,16 @@ class ContentRepositoryConfig(Config):
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
@@ -260,8 +268,8 @@ class ContentRepositoryConfig(Config):
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
#url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
@@ -279,26 +287,25 @@ class ContentRepositoryConfig(Config):
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -20,12 +20,37 @@ from ._base import Config, ConfigError
|
||||
|
||||
class RoomDirectoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
alias_creation_rules = config["alias_creation_rules"]
|
||||
alias_creation_rules = config.get("alias_creation_rules")
|
||||
|
||||
self._alias_creation_rules = [
|
||||
_AliasRule(rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
if alias_creation_rules is not None:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule("alias_creation_rules", rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
else:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"alias_creation_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
room_list_publication_rules = config.get("room_list_publication_rules")
|
||||
|
||||
if room_list_publication_rules is not None:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule("room_list_publication_rules", rule)
|
||||
for rule in room_list_publication_rules
|
||||
]
|
||||
else:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"room_list_publication_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -33,60 +58,138 @@ class RoomDirectoryConfig(Config):
|
||||
# on this server.
|
||||
#
|
||||
# The format of this option is a list of rules that contain globs that
|
||||
# match against user_id and the new alias (fully qualified with server
|
||||
# name). The action in the first rule that matches is taken, which can
|
||||
# currently either be "allow" or "deny".
|
||||
# match against user_id, room_id and the new alias (fully qualified with
|
||||
# server name). The action in the first rule that matches is taken,
|
||||
# which can currently either be "allow" or "deny".
|
||||
#
|
||||
# If no rules match the request is denied.
|
||||
alias_creation_rules:
|
||||
- user_id: "*"
|
||||
alias: "*"
|
||||
action: allow
|
||||
# Missing user_id/room_id/alias fields default to "*".
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can create aliases.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches against the creator of the alias
|
||||
# alias: Matches against the alias being created
|
||||
# room_id: Matches against the room ID the alias is being pointed at
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#alias_creation_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
|
||||
# The `room_list_publication_rules` option controls who can publish and
|
||||
# which rooms can be published in the public room list.
|
||||
#
|
||||
# The format of this option is the same as that for
|
||||
# `alias_creation_rules`.
|
||||
#
|
||||
# If the room has one or more aliases associated with it, only one of
|
||||
# the aliases needs to match the alias rule. If there are no aliases
|
||||
# then only rules with `alias: *` match.
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can publish rooms.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches agaisnt the creator of the alias
|
||||
# room_id: Matches against the room ID being published
|
||||
# alias: Matches against any current local or canonical aliases
|
||||
# associated with the room
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#room_list_publication_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
"""
|
||||
|
||||
def is_alias_creation_allowed(self, user_id, alias):
|
||||
def is_alias_creation_allowed(self, user_id, room_id, alias):
|
||||
"""Checks if the given user is allowed to create the given alias
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean: True if user is allowed to crate the alias
|
||||
"""
|
||||
for rule in self._alias_creation_rules:
|
||||
if rule.matches(user_id, alias):
|
||||
if rule.matches(user_id, room_id, [alias]):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
def is_publishing_room_allowed(self, user_id, room_id, aliases):
|
||||
"""Checks if the given user is allowed to publish the room
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): any local aliases associated with the room
|
||||
|
||||
Returns:
|
||||
boolean: True if user can publish room
|
||||
"""
|
||||
for rule in self._room_list_publication_rules:
|
||||
if rule.matches(user_id, room_id, aliases):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class _AliasRule(object):
|
||||
def __init__(self, rule):
|
||||
class _RoomDirectoryRule(object):
|
||||
"""Helper class to test whether a room directory action is allowed, like
|
||||
creating an alias or publishing a room.
|
||||
"""
|
||||
|
||||
def __init__(self, option_name, rule):
|
||||
"""
|
||||
Args:
|
||||
option_name (str): Name of the config option this rule belongs to
|
||||
rule (dict): The rule as specified in the config
|
||||
"""
|
||||
|
||||
action = rule["action"]
|
||||
user_id = rule["user_id"]
|
||||
alias = rule["alias"]
|
||||
user_id = rule.get("user_id", "*")
|
||||
room_id = rule.get("room_id", "*")
|
||||
alias = rule.get("alias", "*")
|
||||
|
||||
if action in ("allow", "deny"):
|
||||
self.action = action
|
||||
else:
|
||||
raise ConfigError(
|
||||
"alias_creation_rules rules can only have action of 'allow'"
|
||||
" or 'deny'"
|
||||
"%s rules can only have action of 'allow'"
|
||||
" or 'deny'" % (option_name,)
|
||||
)
|
||||
|
||||
self._alias_matches_all = alias == "*"
|
||||
|
||||
try:
|
||||
self._user_id_regex = glob_to_regex(user_id)
|
||||
self._alias_regex = glob_to_regex(alias)
|
||||
self._room_id_regex = glob_to_regex(room_id)
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||
|
||||
def matches(self, user_id, alias):
|
||||
"""Tests if this rule matches the given user_id and alias.
|
||||
def matches(self, user_id, room_id, aliases):
|
||||
"""Tests if this rule matches the given user_id, room_id and aliases.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
alias (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): The associated aliases to the room. Will be a
|
||||
single element for testing alias creation, and can be empty for
|
||||
testing room publishing.
|
||||
|
||||
Returns:
|
||||
boolean
|
||||
@@ -96,7 +199,22 @@ class _AliasRule(object):
|
||||
if not self._user_id_regex.match(user_id):
|
||||
return False
|
||||
|
||||
if not self._alias_regex.match(alias):
|
||||
if not self._room_id_regex.match(room_id):
|
||||
return False
|
||||
|
||||
return True
|
||||
# We only have alias checks left, so we can short circuit if the alias
|
||||
# rule matches everything.
|
||||
if self._alias_matches_all:
|
||||
return True
|
||||
|
||||
# If we are not given any aliases then this rule only matches if the
|
||||
# alias glob matches all aliases, which we checked above.
|
||||
if not aliases:
|
||||
return False
|
||||
|
||||
# Otherwise, we just need one alias to match
|
||||
for alias in aliases:
|
||||
if self._alias_regex.match(alias):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -67,44 +67,43 @@ class SAML2Config(Config):
|
||||
return """
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
#
|
||||
# saml2_config:
|
||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||
# See pysaml2 docs for format of config.
|
||||
#
|
||||
# # The following is the configuration for the pysaml2 Service Provider.
|
||||
# # See pysaml2 docs for format of config.
|
||||
# #
|
||||
# # Default values will be used for the 'entityid' and 'service' settings,
|
||||
# # so it is not normally necessary to specify them unless you need to
|
||||
# # override them.
|
||||
# Default values will be used for the 'entityid' and 'service' settings,
|
||||
# so it is not normally necessary to specify them unless you need to
|
||||
# override them.
|
||||
#
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# # local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#saml2_config:
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# #local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # The following is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The rest of sp_config is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
#
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# # config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
""" % {"config_dir_path": config_dir_path}
|
||||
|
||||
@@ -260,9 +260,11 @@ class ServerConfig(Config):
|
||||
# This is used by remote servers to connect to this server,
|
||||
# e.g. matrix.org, localhost:8080, etc.
|
||||
# This is also the last part of your UserID.
|
||||
#
|
||||
server_name: "%(server_name)s"
|
||||
|
||||
# When running as a daemon, the file to store the pid in
|
||||
#
|
||||
pid_file: %(pid_file)s
|
||||
|
||||
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||
@@ -286,38 +288,51 @@ class ServerConfig(Config):
|
||||
#
|
||||
# This setting requires the affinity package to be installed!
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
#cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# The path to the web client which will be served at /_matrix/client/
|
||||
# if 'webclient' is configured under the 'listeners' configuration.
|
||||
#
|
||||
# web_client_location: "/path/to/web/root"
|
||||
#web_client_location: "/path/to/web/root"
|
||||
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
# enter into the 'custom HS URL' field on their client. If you
|
||||
# use synapse with a reverse proxy, this should be the URL to reach
|
||||
# synapse via the proxy.
|
||||
# public_baseurl: https://example.com/
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
# hard limit.
|
||||
#
|
||||
soft_file_limit: 0
|
||||
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
#
|
||||
use_presence: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# filter_timeline_limit: 5000
|
||||
#
|
||||
#filter_timeline_limit: 5000
|
||||
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
#
|
||||
#block_non_admin_invites: True
|
||||
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
#
|
||||
#enable_search: false
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
@@ -325,7 +340,7 @@ class ServerConfig(Config):
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
# federation_domain_whitelist:
|
||||
#federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
@@ -397,11 +412,11 @@ class ServerConfig(Config):
|
||||
# will also need to give Synapse a TLS key and certificate: see the TLS section
|
||||
# below.)
|
||||
#
|
||||
# - port: %(bind_port)s
|
||||
# type: http
|
||||
# tls: true
|
||||
# resources:
|
||||
# - names: [client, federation]
|
||||
#- port: %(bind_port)s
|
||||
# type: http
|
||||
# tls: true
|
||||
# resources:
|
||||
# - names: [client, federation]
|
||||
|
||||
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
|
||||
# that unwraps TLS.
|
||||
@@ -421,52 +436,49 @@ class ServerConfig(Config):
|
||||
|
||||
# example additonal_resources:
|
||||
#
|
||||
# additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
#additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
#
|
||||
#- port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
## Homeserver blocking ##
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#admin_contact: 'mailto:admin@server.com'
|
||||
|
||||
# Global blocking
|
||||
#
|
||||
#hs_disabled: False
|
||||
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
#mau_stats_only: False
|
||||
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
#
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
# enable_search: true
|
||||
#mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
|
||||
@@ -30,11 +30,11 @@ DEFAULT_CONFIG = """\
|
||||
# It's also possible to override the room name, the display name of the
|
||||
# "notices" user, and the avatar for the user.
|
||||
#
|
||||
# server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
#server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ class SpamCheckerConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
#spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
|
||||
@@ -19,6 +19,8 @@ import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
@@ -36,12 +38,15 @@ class TlsConfig(Config):
|
||||
acme_config = {}
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
self.acme_url = acme_config.get(
|
||||
|
||||
# hyperlink complains on py2 if this is not a Unicode
|
||||
self.acme_url = six.text_type(acme_config.get(
|
||||
"url", u"https://acme-v01.api.letsencrypt.org/directory"
|
||||
)
|
||||
))
|
||||
self.acme_port = acme_config.get("port", 80)
|
||||
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
|
||||
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
|
||||
self.acme_domain = acme_config.get("domain", config.get("server_name"))
|
||||
|
||||
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
||||
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
||||
@@ -54,7 +59,7 @@ class TlsConfig(Config):
|
||||
)
|
||||
if not self.tls_private_key_file:
|
||||
raise ConfigError(
|
||||
"tls_certificate_path must be specified if TLS-enabled listeners are "
|
||||
"tls_private_key_path must be specified if TLS-enabled listeners are "
|
||||
"configured."
|
||||
)
|
||||
|
||||
@@ -176,10 +181,11 @@ class TlsConfig(Config):
|
||||
# See 'ACME support' below to enable auto-provisioning this certificate via
|
||||
# Let's Encrypt.
|
||||
#
|
||||
# tls_certificate_path: "%(tls_certificate_path)s"
|
||||
#tls_certificate_path: "%(tls_certificate_path)s"
|
||||
|
||||
# PEM-encoded private key for TLS
|
||||
# tls_private_key_path: "%(tls_private_key_path)s"
|
||||
#
|
||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
@@ -206,28 +212,42 @@ class TlsConfig(Config):
|
||||
# ACME support is disabled by default. Uncomment the following line
|
||||
# (and tls_certificate_path and tls_private_key_path above) to enable it.
|
||||
#
|
||||
# enabled: true
|
||||
#enabled: true
|
||||
|
||||
# Endpoint to use to request certificates. If you only want to test,
|
||||
# use Let's Encrypt's staging url:
|
||||
# https://acme-staging.api.letsencrypt.org/directory
|
||||
#
|
||||
# url: https://acme-v01.api.letsencrypt.org/directory
|
||||
#url: https://acme-v01.api.letsencrypt.org/directory
|
||||
|
||||
# Port number to listen on for the HTTP-01 challenge. Change this if
|
||||
# you are forwarding connections through Apache/Nginx/etc.
|
||||
#
|
||||
# port: 80
|
||||
#port: 80
|
||||
|
||||
# Local addresses to listen on for incoming connections.
|
||||
# Again, you may want to change this if you are forwarding connections
|
||||
# through Apache/Nginx/etc.
|
||||
#
|
||||
# bind_addresses: ['::', '0.0.0.0']
|
||||
#bind_addresses: ['::', '0.0.0.0']
|
||||
|
||||
# How many days remaining on a certificate before it is renewed.
|
||||
#
|
||||
# reprovision_threshold: 30
|
||||
#reprovision_threshold: 30
|
||||
|
||||
# The domain that the certificate should be for. Normally this
|
||||
# should be the same as your Matrix domain (i.e., 'server_name'), but,
|
||||
# by putting a file at 'https://<server_name>/.well-known/matrix/server',
|
||||
# you can delegate incoming traffic to another server. If you do that,
|
||||
# you should give the target of the delegation here.
|
||||
#
|
||||
# For example: if your 'server_name' is 'example.com', but
|
||||
# 'https://example.com/.well-known/matrix/server' delegates to
|
||||
# 'matrix.example.com', you should put 'matrix.example.com' here.
|
||||
#
|
||||
# If not set, defaults to your 'server_name'.
|
||||
#
|
||||
#domain: matrix.example.com
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
@@ -254,8 +274,7 @@ class TlsConfig(Config):
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
"""
|
||||
% locals()
|
||||
|
||||
@@ -40,5 +40,5 @@ class UserDirectoryConfig(Config):
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
# search_all_users: false
|
||||
"""
|
||||
|
||||
@@ -27,20 +27,24 @@ class VoipConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Turn ##
|
||||
## TURN ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
#
|
||||
#turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
#
|
||||
#turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
#
|
||||
turn_user_lifetime: "1h"
|
||||
|
||||
# Whether guests should be allowed to use the TURN server.
|
||||
@@ -48,5 +52,6 @@ class VoipConfig(Config):
|
||||
# However, it does introduce a slight security risk as it allows users to
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
#
|
||||
turn_allow_guests: True
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
@@ -105,9 +107,7 @@ class ClientTLSOptions(object):
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
self._sendSNI = True
|
||||
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
@@ -128,10 +128,8 @@ class ClientTLSOptionsFactory(object):
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
pass
|
||||
self._options = CertificateOptions(verify=False)
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host,
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
||||
return ClientTLSOptions(host, self._options._makeContext())
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import raise_from
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
@@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
@@ -44,6 +50,7 @@ from synapse.util.logcontext import (
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -367,13 +374,18 @@ class Keyring(object):
|
||||
server_name_and_key_ids, perspective_name, perspective_keys
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
"Key lookup failed from %r: %s", perspective_name, e,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
@@ -421,21 +433,30 @@ class Keyring(object):
|
||||
# TODO(mark): Set the minimum_valid_until_ts to that needed by
|
||||
# the events being validated or the current time if validating
|
||||
# an incoming request.
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
try:
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
keys = {}
|
||||
|
||||
@@ -502,11 +523,20 @@ class Keyring(object):
|
||||
if requested_key_id in keys:
|
||||
continue
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
@@ -656,7 +686,7 @@ def _handle_key_deferred(verify_request):
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
except (IOError, RequestSendFailed) as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e),
|
||||
|
||||
@@ -33,6 +33,7 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
@@ -792,10 +793,25 @@ class FederationClient(FederationBase):
|
||||
defer.returnValue(content)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API.
|
||||
|
||||
if room_version in (RoomVersions.V1, RoomVersions.V2):
|
||||
pass # We'll fall through
|
||||
else:
|
||||
raise Exception("Remote server is too old")
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
|
||||
@@ -25,9 +25,10 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
FederationError,
|
||||
IncompatibleRoomVersionError,
|
||||
NotFoundError,
|
||||
@@ -239,8 +240,9 @@ class FederationServer(FederationBase):
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s: %s",
|
||||
event_id, f.getTraceback().rstrip(),
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
yield concurrently_execute(
|
||||
@@ -386,6 +388,13 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
@@ -877,6 +886,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
if not self.config.use_presence and edu_type == "m.presence":
|
||||
return
|
||||
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
return super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
|
||||
@@ -159,8 +159,12 @@ class FederationRemoteSendQueue(object):
|
||||
# stream.
|
||||
pass
|
||||
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
def build_and_send_edu(self, destination, edu_type, content, key=None):
|
||||
"""As per TransactionQueue"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
pos = self._next_pos()
|
||||
|
||||
edu = Edu(
|
||||
@@ -465,15 +469,11 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
|
||||
for destination, edu_map in iteritems(buff.keyed_edus):
|
||||
for key, edu in edu_map.items():
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=key,
|
||||
)
|
||||
transaction_queue.send_edu(edu, key)
|
||||
|
||||
for destination, edu_list in iteritems(buff.edus):
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=None,
|
||||
)
|
||||
transaction_queue.send_edu(edu, None)
|
||||
|
||||
for destination in buff.device_destinations:
|
||||
transaction_queue.send_device_messages(destination)
|
||||
|
||||
@@ -33,7 +33,6 @@ from synapse.metrics import (
|
||||
event_processing_loop_counter,
|
||||
event_processing_loop_room_count,
|
||||
events_processed_counter,
|
||||
sent_edus_counter,
|
||||
sent_transactions_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -47,10 +46,24 @@ from .units import Edu, Transaction
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:count", ""
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total", ""
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
sent_edus_counter = Counter(
|
||||
"synapse_federation_client_sent_edus",
|
||||
"Total number of EDUs successfully sent",
|
||||
)
|
||||
|
||||
sent_edus_by_type = Counter(
|
||||
"synapse_federation_client_sent_edus_by_type",
|
||||
"Number of sent EDUs successfully sent, by event type",
|
||||
["type"],
|
||||
)
|
||||
|
||||
|
||||
@@ -348,7 +361,19 @@ class TransactionQueue(object):
|
||||
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
def build_and_send_edu(self, destination, edu_type, content, key=None):
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
destination (str): name of server to send to
|
||||
edu_type (str): type of EDU to send
|
||||
content (dict): content of EDU
|
||||
key (Any|None): clobbering key for this edu
|
||||
"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
@@ -356,20 +381,23 @@ class TransactionQueue(object):
|
||||
content=content,
|
||||
)
|
||||
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
self.send_edu(edu, key)
|
||||
|
||||
sent_edus_counter.inc()
|
||||
def send_edu(self, edu, key):
|
||||
"""Queue an EDU for sending
|
||||
|
||||
Args:
|
||||
edu (Edu): edu to send
|
||||
key (Any|None): clobbering key for this edu
|
||||
"""
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
edu.destination, {}
|
||||
)[(edu.edu_type, key)] = edu
|
||||
else:
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||
self.pending_edus_by_dest.setdefault(edu.destination, []).append(edu)
|
||||
|
||||
self._attempt_new_transaction(destination)
|
||||
self._attempt_new_transaction(edu.destination)
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
if destination == self.server_name:
|
||||
@@ -496,6 +524,9 @@ class TransactionQueue(object):
|
||||
)
|
||||
if success:
|
||||
sent_transactions_counter.inc()
|
||||
sent_edus_counter.inc(len(pending_edus))
|
||||
for edu in pending_edus:
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
|
||||
@@ -393,7 +393,7 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
return self.handler.on_context_state_request(
|
||||
origin,
|
||||
context,
|
||||
parse_string_from_args(query, "event_id", None),
|
||||
parse_string_from_args(query, "event_id", None, required=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -404,7 +404,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
|
||||
return self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
parse_string_from_args(query, "event_id", None),
|
||||
parse_string_from_args(query, "event_id", None, required=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -736,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
data = yield self.handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
network_tuple=network_tuple,
|
||||
from_federation=True,
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
@@ -191,6 +191,11 @@ class GroupAttestionRenewer(object):
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Failed to renew attestation of %r in %r: %s",
|
||||
user_id, group_id, e,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error renewing attestation of %r in %r",
|
||||
user_id, group_id)
|
||||
|
||||
@@ -113,8 +113,7 @@ class GroupsServerHandler(object):
|
||||
room_id = room_entry["room_id"]
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
entry = dict(entry) # so we don't change whats cached
|
||||
entry.pop("room_id", None)
|
||||
@@ -544,8 +543,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
|
||||
if not entry:
|
||||
|
||||
@@ -17,7 +17,6 @@ from .admin import AdminHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .federation import FederationHandler
|
||||
from .identity import IdentityHandler
|
||||
from .register import RegistrationHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
@@ -41,7 +40,6 @@ class Handlers(object):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.registration_handler = RegistrationHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
|
||||
@@ -167,4 +167,4 @@ class BaseHandler(object):
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error kicking guest user: %s" % (e,))
|
||||
logger.exception("Error kicking guest user: %s" % (e,))
|
||||
|
||||
@@ -56,6 +56,7 @@ class AcmeHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
self._acme_domain = hs.config.acme_domain
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_listening(self):
|
||||
@@ -123,15 +124,15 @@ class AcmeHandler(object):
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
|
||||
logger.warning("Reprovisioning %s", self.hs.hostname)
|
||||
logger.warning("Reprovisioning %s", self._acme_domain)
|
||||
|
||||
try:
|
||||
yield self._issuer.issue_cert(self.hs.hostname)
|
||||
yield self._issuer.issue_cert(self._acme_domain)
|
||||
except Exception:
|
||||
logger.exception("Fail!")
|
||||
raise
|
||||
logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
|
||||
cert_chain = self._store.certs[self.hs.hostname]
|
||||
logger.warning("Reprovisioned %s, saving.", self._acme_domain)
|
||||
cert_chain = self._store.certs[self._acme_domain]
|
||||
|
||||
try:
|
||||
with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
|
||||
|
||||
@@ -20,7 +20,11 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import FederationDeniedError
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
)
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -504,13 +508,13 @@ class DeviceListEduUpdater(object):
|
||||
origin = get_domain_from_id(user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except NotRetryingDestination:
|
||||
except (
|
||||
NotRetryingDestination, RequestSendFailed, HttpResponseException,
|
||||
):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn(
|
||||
"Failed to handle device list update for %s,"
|
||||
" we're not retrying the remote",
|
||||
user_id,
|
||||
"Failed to handle device list update for %s", user_id,
|
||||
)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
|
||||
@@ -112,7 +112,9 @@ class DirectoryHandler(BaseHandler):
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
|
||||
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias.to_string(),
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
@@ -395,9 +397,9 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if not self.spam_checker.user_may_publish_room(
|
||||
requester.user.to_string(), room_id
|
||||
):
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_publish_room(user_id, room_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This user is not permitted to publish rooms to the room list"
|
||||
@@ -415,7 +417,24 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
making_public = visibility == "public"
|
||||
if making_public:
|
||||
room_aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
canonical_alias = yield self.store.get_canonical_alias_for_room(room_id)
|
||||
if canonical_alias:
|
||||
room_aliases.append(canonical_alias)
|
||||
|
||||
if not self.config.is_publishing_room_allowed(
|
||||
user_id, room_id, room_aliases,
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(
|
||||
403, "Not allowed to publish room",
|
||||
)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, making_public)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_appservice_room_list(self, appservice_id, network_id,
|
||||
|
||||
@@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
|
||||
set(auth_events.keys()) | set(state_events.keys())
|
||||
)
|
||||
|
||||
# We now have a chunk of events plus associated state and auth chain to
|
||||
# persist. We do the persistence in two steps:
|
||||
# 1. Auth events and state get persisted as outliers, plus the
|
||||
# backward extremities get persisted (as non-outliers).
|
||||
# 2. The rest of the events in the chunk get persisted one by one, as
|
||||
# each one depends on the previous event for its state.
|
||||
#
|
||||
# The important thing is that events in the chunk get persisted as
|
||||
# non-outliers, including when those events are also in the state or
|
||||
# auth chain. Caution must therefore be taken to ensure that they are
|
||||
# not accidentally marked as outliers.
|
||||
|
||||
# Step 1a: persist auth events that *don't* appear in the chunk
|
||||
ev_infos = []
|
||||
for a in auth_events.values():
|
||||
if a.event_id in seen_events:
|
||||
# We only want to persist auth events as outliers that we haven't
|
||||
# seen and aren't about to persist as part of the backfilled chunk.
|
||||
if a.event_id in seen_events or a.event_id in event_map:
|
||||
continue
|
||||
|
||||
a.internal_metadata.outlier = True
|
||||
ev_infos.append({
|
||||
"event": a,
|
||||
@@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
})
|
||||
|
||||
# Step 1b: persist the events in the chunk we fetched state for (i.e.
|
||||
# the backwards extremities) as non-outliers.
|
||||
for e_id in events_to_state:
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
ev = event_map[e_id]
|
||||
assert(not ev.internal_metadata.is_outlier())
|
||||
|
||||
ev_infos.append({
|
||||
"event": event_map[e_id],
|
||||
"event": ev,
|
||||
"state": events_to_state[e_id],
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id in event_map[e_id].auth_event_ids()
|
||||
for a_id in ev.auth_event_ids()
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
# Step 2: Persist the rest of the events in the chunk one by one
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
for event in events:
|
||||
if event in events_to_state:
|
||||
continue
|
||||
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
assert(not event.internal_metadata.is_outlier())
|
||||
|
||||
# We store these one at a time since each event depends on the
|
||||
# previous to work out the state.
|
||||
# TODO: We can probably do something more clever here.
|
||||
|
||||
@@ -20,7 +20,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,13 +46,19 @@ def _create_rerouter(func_name):
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
def h(failure):
|
||||
def http_response_errback(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
return failure
|
||||
d.addErrback(h)
|
||||
|
||||
def request_failed_errback(failure):
|
||||
failure.trap(RequestSendFailed)
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
d.addErrback(http_response_errback)
|
||||
d.addErrback(request_failed_errback)
|
||||
return d
|
||||
return f
|
||||
|
||||
|
||||
@@ -436,10 +436,11 @@ class EventCreationHandler(object):
|
||||
|
||||
if event.is_state():
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
logger.info(
|
||||
"Not bothering to persist duplicate state event %s", event.event_id,
|
||||
)
|
||||
if prev_state is not None:
|
||||
logger.info(
|
||||
"Not bothering to persist state event %s duplicated by %s",
|
||||
event.event_id, prev_state.event_id,
|
||||
)
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
yield self.handle_new_client_event(
|
||||
|
||||
@@ -136,7 +136,11 @@ class PaginationHandler(object):
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"[purge] failed",
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
@@ -254,7 +258,7 @@ class PaginationHandler(object):
|
||||
})
|
||||
|
||||
state = None
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
if event_filter and event_filter.lazy_load_members() and len(events) > 0:
|
||||
# TODO: remove redundant members
|
||||
|
||||
# FIXME: we also care about invite targets etc.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user